Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* SCTP kernel implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * (C) Copyright IBM Corp. 2001, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 1999-2000 Cisco, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 1999-2001 Motorola, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (c) 2001 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (c) 2001 Nokia, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (c) 2001 La Monte H.P. Yarroll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * This abstraction carries sctp events to the ULP (sockets).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Please send any bug reports or fixes you make to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * email address(es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *    lksctp developers <linux-sctp@vger.kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Written or modified by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    Jon Grimm             <jgrimm@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *    La Monte H.P. Yarroll <piggy@acm.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *    Sridhar Samudrala     <sri@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <net/busy_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <net/sctp/structs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <net/sctp/sctp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <net/sctp/sm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /* Forward declarations for internal helpers.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 					      struct sctp_ulpevent *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 					      struct sctp_ulpevent *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /* 1st Level Abstractions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /* Initialize a ULP queue from a block of memory.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 				 struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	memset(ulpq, 0, sizeof(struct sctp_ulpq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	ulpq->asoc = asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	skb_queue_head_init(&ulpq->reasm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	skb_queue_head_init(&ulpq->reasm_uo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	skb_queue_head_init(&ulpq->lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	ulpq->pd_mode  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	return ulpq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* Flush the reassembly and ordering queues.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) /* Dispose of a ulpqueue.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) void sctp_ulpq_free(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	sctp_ulpq_flush(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) /* Process an incoming DATA chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct sk_buff_head temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	int event_eor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/* Create an event from the incoming chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	event->ppid = chunk->subh.data_hdr->ppid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	/* Do reassembly if needed.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	event = sctp_ulpq_reasm(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	/* Do ordering if needed.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		/* Create a temporary list to collect chunks on.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		skb_queue_head_init(&temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		__skb_queue_tail(&temp, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		if (event->msg_flags & MSG_EOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			event = sctp_ulpq_order(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	 * very first SKB on the 'temp' list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		sctp_ulpq_tail_event(ulpq, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	return event_eor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) /* Add a new event for propagation to the ULP.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) /* Clear the partial delivery mode for this socket.   Note: This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * assumes that no association is currently in partial delivery mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct sctp_sock *sp = sctp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	if (atomic_dec_and_test(&sp->pd_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		/* This means there are no other associations in PD, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		 * we can go ahead and clear out the lobby in one shot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		if (!skb_queue_empty(&sp->pd_lobby)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			skb_queue_splice_tail_init(&sp->pd_lobby,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 						   &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		/* There are other associations in PD, so we only need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		 * pull stuff out of the lobby that belongs to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		 * associations that is exiting PD (all of its notifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		 * are posted here).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			struct sk_buff *skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 				event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 				if (event->asoc == asoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 					__skb_unlink(skb, &sp->pd_lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 					__skb_queue_tail(&sk->sk_receive_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 							 skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) /* Set the pd_mode on the socket and ulpq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	atomic_inc(&sp->pd_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	ulpq->pd_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /* Clear the pd_mode and restart any pending messages waiting for delivery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	ulpq->pd_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	sctp_ulpq_reasm_drain(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct sock *sk = ulpq->asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct sctp_sock *sp = sctp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	struct sk_buff_head *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	int clear_pd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	skb = __skb_peek(skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/* If the socket is just going to throw this away, do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * even try to deliver it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	     !sctp_ulpevent_is_notification(event)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (!sctp_ulpevent_is_notification(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		sk_mark_napi_id(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		sk_incoming_cpu_update(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	/* Check if the user wishes to receive this event.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* If we are in partial delivery mode, post to the lobby until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * partial delivery is cleared, unless, of course _this_ is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * the association the cause of the partial delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	if (atomic_read(&sp->pd_mode) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		queue = &sk->sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		if (ulpq->pd_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			/* If the association is in partial delivery, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			 * need to finish delivering the partially processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			 * packet before passing any other data.  This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			 * because we don't truly support stream interleaving.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			if ((event->msg_flags & MSG_NOTIFICATION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			    (SCTP_DATA_NOT_FRAG ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				queue = &sp->pd_lobby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 				clear_pd = event->msg_flags & MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 				queue = &sk->sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			 * If fragment interleave is enabled, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			 * can queue this to the receive queue instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			 * of the lobby.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			if (sp->frag_interleave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 				queue = &sk->sk_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 				queue = &sp->pd_lobby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	skb_queue_splice_tail_init(skb_list, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* Did we just complete partial delivery and need to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	 * rolling again?  Move pending data to the receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	 * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (clear_pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		sctp_ulpq_clear_pd(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (!sock_owned_by_user(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			sp->data_ready_signalled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (skb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		sctp_queue_purge_ulpevents(skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) /* 2nd Level Abstractions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) /* Helper function to store chunks that need to be reassembled.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 					 struct sctp_ulpevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct sk_buff *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	__u32 tsn, ctsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	tsn = event->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/* See if it belongs at the end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	pos = skb_peek_tail(&ulpq->reasm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	if (!pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* Short circuit just dropping it at the end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	cevent = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	ctsn = cevent->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (TSN_lt(ctsn, tsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/* Find the right place in this list. We store them by TSN.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	skb_queue_walk(&ulpq->reasm, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		cevent = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		ctsn = cevent->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		if (TSN_lt(tsn, ctsn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* Insert before pos. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) /* Helper function to return an event corresponding to the reassembled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * datagram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * This routine creates a re-assembled skb given the first and last skb's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * payload was fragmented on the way and ip had to reassemble them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * We add the rest of skb's to the first skb's fraglist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 						  struct sk_buff_head *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 						  struct sk_buff *f_frag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 						  struct sk_buff *l_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct sk_buff *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	struct sk_buff *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct sk_buff *pnext, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/* Store the pointer to the 2nd skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (f_frag == l_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		pos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		pos = f_frag->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* Get the last skb in the f_frag's frag_list if present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	for (last = list; list; last = list, list = list->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	/* Add the list of remaining fragments to the first fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	 * frag_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		last->next = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		if (skb_cloned(f_frag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			/* This is a cloned skb, we can't just modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			 * the frag_list.  We need a new skb to do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			 * Instead of calling skb_unshare(), we'll do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			 * ourselves since we need to delay the free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			new = skb_copy(f_frag, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 				return NULL;	/* try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			sctp_skb_set_owner_r(new, f_frag->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			skb_shinfo(new)->frag_list = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			skb_shinfo(f_frag)->frag_list = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	/* Remove the first fragment from the reassembly queue.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	__skb_unlink(f_frag, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	/* if we did unshare, then free the old skb and re-assign */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		kfree_skb(f_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		f_frag = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	while (pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		pnext = pos->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		/* Update the len and data_len fields of the first fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		f_frag->len += pos->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		f_frag->data_len += pos->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		/* Remove the fragment from the reassembly queue.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		__skb_unlink(pos, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		/* Break if we have reached the last fragment.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		if (pos == l_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		pos->next = pnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		pos = pnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	event = sctp_skb2event(f_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) /* Helper function to check if an incoming chunk has filled up the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * missing fragment in a SCTP datagram and return the corresponding event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct sk_buff *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct sk_buff *first_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	__u32 ctsn, next_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct sctp_ulpevent *retval = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct sk_buff *pd_first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct sk_buff *pd_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	size_t pd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct sctp_association *asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	u32 pd_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/* Initialized to 0 just to avoid compiler warning message.  Will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 * never be used with this value. It is referenced only after it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	 * is set when we find the first fragment of a message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	next_tsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/* The chunks are held in the reasm queue sorted by TSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * Walk through the queue sequentially and look for a sequence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * fragmented chunks that complete a datagram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * 'first_frag' and next_tsn are reset when we find a chunk which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * is the first fragment of a datagram. Once these 2 fields are set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * we expect to find the remaining middle fragments and the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * fragment in order. If not, first_frag is reset to NULL and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * start the next pass when we find another first fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 * There is a potential to do partial delivery if user sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	 * to see if can do PD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	skb_queue_walk(&ulpq->reasm, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		cevent = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		ctsn = cevent->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		case SCTP_DATA_FIRST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			/* If this "FIRST_FRAG" is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			 * element in the queue, then count it towards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			 * possible PD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			if (skb_queue_is_first(&ulpq->reasm, pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			    pd_first = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			    pd_last = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			    pd_len = pos->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			    pd_first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			    pd_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			    pd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			first_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			next_tsn = ctsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		case SCTP_DATA_MIDDLE_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			if ((first_frag) && (ctsn == next_tsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				next_tsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				if (pd_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 				    pd_last = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				    pd_len += pos->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				first_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		case SCTP_DATA_LAST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			if (first_frag && (ctsn == next_tsn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				first_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	asoc = ulpq->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (pd_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		/* Make sure we can enter partial deliver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		 * We can trigger partial delivery only if framgent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		 * interleave is set, or the socket is not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		 * in  partial delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		cevent = sctp_skb2event(pd_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (pd_point && pd_point <= pd_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			retval = sctp_make_reassembled_event(asoc->base.net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 							     &ulpq->reasm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 							     pd_first, pd_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 				sctp_ulpq_set_pd(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 					     &ulpq->reasm, first_frag, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		retval->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /* Retrieve the next set of fragments of a partial message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct sk_buff *pos, *last_frag, *first_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	__u32 ctsn, next_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	int is_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct sctp_ulpevent *retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/* The chunks are held in the reasm queue sorted by TSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * Walk through the queue sequentially and look for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * sequence of fragmented chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (skb_queue_empty(&ulpq->reasm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	last_frag = first_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	retval = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	next_tsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	skb_queue_walk(&ulpq->reasm, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		cevent = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		ctsn = cevent->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		case SCTP_DATA_FIRST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			if (!first_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		case SCTP_DATA_MIDDLE_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			if (!first_frag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				first_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 				next_tsn = ctsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 				last_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			} else if (next_tsn == ctsn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 				next_tsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				last_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		case SCTP_DATA_LAST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			if (!first_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				first_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			else if (ctsn != next_tsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			last_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			is_last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* We have the reassembled event. There is no need to look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	 * further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 					     first_frag, last_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (retval && is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		retval->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * need reassembling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 						struct sctp_ulpevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct sctp_ulpevent *retval = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	/* Check if this is part of a fragmented message.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		event->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	sctp_ulpq_store_reasm(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (!ulpq->pd_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		retval = sctp_ulpq_retrieve_reassembled(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		__u32 ctsn, ctsnap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		/* Do not even bother unless this is the next tsn to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		 * be delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		ctsn = event->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		if (TSN_lte(ctsn, ctsnap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			retval = sctp_ulpq_retrieve_partial(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) /* Retrieve the first part (sequential fragments) for partial delivery.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct sk_buff *pos, *last_frag, *first_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	__u32 ctsn, next_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct sctp_ulpevent *retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* The chunks are held in the reasm queue sorted by TSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 * Walk through the queue sequentially and look for a sequence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * fragmented chunks that start a datagram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (skb_queue_empty(&ulpq->reasm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	last_frag = first_frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	retval = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	next_tsn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	skb_queue_walk(&ulpq->reasm, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		cevent = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		ctsn = cevent->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		case SCTP_DATA_FIRST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			if (!first_frag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				first_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				next_tsn = ctsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 				last_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		case SCTP_DATA_MIDDLE_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			if (!first_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			if (ctsn == next_tsn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				next_tsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				last_frag = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		case SCTP_DATA_LAST_FRAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			if (!first_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	/* We have the reassembled event. There is no need to look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 					     first_frag, last_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  * Flush out stale fragments from the reassembly queue when processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * a Forward TSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * RFC 3758, Section 3.6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * After receiving and processing a FORWARD TSN, the data receiver MUST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * take cautions in updating its re-assembly queue.  The receiver MUST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * remove any partially reassembled message, which is still missing one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * or more TSNs earlier than or equal to the new cumulative TSN point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * In the event that the receiver has invoked the partial delivery API,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * a notification SHOULD also be generated to inform the upper layer API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * that the message being partially delivered will NOT be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	struct sk_buff *pos, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	__u32 tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (skb_queue_empty(&ulpq->reasm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		event = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		tsn = event->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		/* Since the entire message must be abandoned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		 * sender (item A3 in Section 3.5, RFC 3758), we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		 * free all fragments on the list that are less then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		 * or equal to ctsn_point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if (TSN_lte(tsn, fwd_tsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			__skb_unlink(pos, &ulpq->reasm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * Drain the reassembly queue.  If we just cleared parted delivery, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * is possible that the reassembly queue will contain already reassembled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * messages.  Retrieve any such messages and give them to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct sctp_ulpevent *event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (skb_queue_empty(&ulpq->reasm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		struct sk_buff_head temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		skb_queue_head_init(&temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		__skb_queue_tail(&temp, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		/* Do ordering if needed.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		if (event->msg_flags & MSG_EOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			event = sctp_ulpq_order(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		/* Send event to the ULP.  'event' is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		 * sctp_ulpevent for  very first SKB on the  temp' list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			sctp_ulpq_tail_event(ulpq, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) /* Helper function to gather skbs that have possibly become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * ordered by an incoming chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 					      struct sctp_ulpevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct sk_buff_head *event_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct sk_buff *pos, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct sctp_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	__u16 sid, csid, cssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	sid = event->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	stream  = &ulpq->asoc->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/* We are holding the chunks by stream, by SSN.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		cevent = (struct sctp_ulpevent *) pos->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		csid = cevent->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		cssn = cevent->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		/* Have we gone too far?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if (csid > sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		/* Have we not gone far enough?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		if (csid < sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		if (cssn != sctp_ssn_peek(stream, in, sid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		/* Found it, so mark in the stream. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		sctp_ssn_next(stream, in, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		__skb_unlink(pos, &ulpq->lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		/* Attach all gathered skbs to the event.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		__skb_queue_tail(event_list, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) /* Helper function to store chunks needing ordering.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 					   struct sctp_ulpevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	struct sk_buff *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	__u16 sid, csid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	__u16 ssn, cssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	pos = skb_peek_tail(&ulpq->lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (!pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	sid = event->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	ssn = event->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	cevent = (struct sctp_ulpevent *) pos->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	csid = cevent->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	cssn = cevent->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (sid > csid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if ((sid == csid) && SSN_lt(cssn, ssn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/* Find the right place in this list.  We store them by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * stream ID and then by SSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	skb_queue_walk(&ulpq->lobby, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		cevent = (struct sctp_ulpevent *) pos->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		csid = cevent->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		cssn = cevent->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		if (csid > sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		if (csid == sid && SSN_lt(ssn, cssn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/* Insert before pos. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 					     struct sctp_ulpevent *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	__u16 sid, ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct sctp_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	/* Check if this message needs ordering.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (event->msg_flags & SCTP_DATA_UNORDERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	/* Note: The stream ID must be verified before this routine.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	sid = event->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	ssn = event->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	stream  = &ulpq->asoc->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* Is this the expected SSN for this stream ID?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (ssn != sctp_ssn_peek(stream, in, sid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		/* We've received something out of order, so find where it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 * needs to be placed.  We order by stream and then by SSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		sctp_ulpq_store_ordered(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/* Mark that the next chunk has been found.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	sctp_ssn_next(stream, in, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/* Go find any other chunks that were waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	sctp_ulpq_retrieve_ordered(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) /* Helper function to gather skbs that have possibly become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * ordered by forward tsn skipping their dependencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct sk_buff *pos, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct sctp_ulpevent *cevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct sctp_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct sk_buff_head temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct sk_buff_head *lobby = &ulpq->lobby;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	__u16 csid, cssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	stream = &ulpq->asoc->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	/* We are holding the chunks by stream, by SSN.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	skb_queue_head_init(&temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	sctp_skb_for_each(pos, lobby, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		cevent = (struct sctp_ulpevent *) pos->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		csid = cevent->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		cssn = cevent->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		/* Have we gone too far?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		if (csid > sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		/* Have we not gone far enough?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if (csid < sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		/* see if this ssn has been marked by skipping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		__skb_unlink(pos, lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			/* Create a temporary list to collect chunks on.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			event = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		/* Attach all gathered skbs to the event.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		__skb_queue_tail(&temp, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/* If we didn't reap any data, see if the next expected SSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 * is next on the queue and if so, use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (event == NULL && pos != (struct sk_buff *)lobby) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		cevent = (struct sctp_ulpevent *) pos->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		csid = cevent->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		cssn = cevent->ssn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			sctp_ssn_next(stream, in, csid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			__skb_unlink(pos, lobby);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			__skb_queue_tail(&temp, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			event = sctp_skb2event(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* Send event to the ULP.  'event' is the sctp_ulpevent for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * very first SKB on the 'temp' list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		/* see if we have more ordered that we can deliver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		sctp_ulpq_retrieve_ordered(ulpq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		sctp_ulpq_tail_event(ulpq, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) /* Skip over an SSN. This is used during the processing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * Forwared TSN chunk to skip over the abandoned ordered data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	struct sctp_stream *stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	/* Note: The stream ID must be verified before this routine.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	stream  = &ulpq->asoc->stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/* Is this an old SSN?  If so ignore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/* Mark that we are no longer expecting this SSN or lower. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	sctp_ssn_skip(stream, in, sid, ssn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/* Go find any other chunks that were waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * ordering and deliver them if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	sctp_ulpq_reap_ordered(ulpq, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			    __u16 needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	__u16 freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	__u32 tsn, last_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct sk_buff *skb, *flist, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct sctp_tsnmap *tsnmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	tsnmap = &ulpq->asoc->peer.tsn_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	while ((skb = skb_peek_tail(list)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		event = sctp_skb2event(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		tsn = event->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		/* Don't renege below the Cumulative TSN ACK Point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		/* Events in ordering queue may have multiple fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 * corresponding to additional TSNs.  Sum the total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		 * freed space; find the last TSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		freed += skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		flist = skb_shinfo(skb)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		for (last = flist; flist; flist = flist->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			last = flist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			freed += skb_headlen(last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			last_tsn = sctp_skb2event(last)->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			last_tsn = tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		/* Unlink the event, then renege all applicable TSNs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		__skb_unlink(skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		sctp_ulpevent_free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		while (TSN_lte(tsn, last_tsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			sctp_tsnmap_renege(tsnmap, tsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			tsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		if (freed >= needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Renege 'needed' bytes from the ordering queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Renege 'needed' bytes from the reassembly queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* Partial deliver the first message as there is pressure on rwnd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	struct sctp_association *asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct sctp_sock *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	__u32 ctsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	asoc = ulpq->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	sp = sctp_sk(asoc->base.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	/* If the association is already in Partial Delivery mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * we have nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (ulpq->pd_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	/* Data must be at or below the Cumulative TSN ACK Point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * start partial delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	skb = skb_peek(&asoc->ulpq.reasm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		ctsn = sctp_skb2event(skb)->tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* If the user enabled fragment interleave socket option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	 * multiple associations can enter partial delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	 * Otherwise, we can only enter partial delivery if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 * socket is not in partial deliver mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		/* Is partial delivery possible?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		event = sctp_ulpq_retrieve_first(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		/* Send event to the ULP.   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			struct sk_buff_head temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			skb_queue_head_init(&temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			__skb_queue_tail(&temp, sctp_event2skb(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			sctp_ulpq_tail_event(ulpq, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			sctp_ulpq_set_pd(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* Renege some packets to make room for an incoming chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		      gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct sctp_association *asoc = ulpq->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	__u32 freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	__u16 needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	needed = ntohs(chunk->chunk_hdr->length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		 sizeof(struct sctp_data_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		freed = sctp_ulpq_renege_order(ulpq, needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		if (freed < needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* If able to free enough room, accept this chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	    freed >= needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		 * Enter partial delivery if chunk has not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		 * delivered; otherwise, drain the reassembly queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		if (retval <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			sctp_ulpq_partial_delivery(ulpq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		else if (retval == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			sctp_ulpq_reasm_drain(ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	sk_mem_reclaim(asoc->base.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Notify the application if an association is aborted and in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * partial delivery mode.  Send up any pending received messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	struct sctp_ulpevent *ev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct sctp_sock *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (!ulpq->pd_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	sk = ulpq->asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	sp = sctp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 				       SCTP_PARTIAL_DELIVERY_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 					      SCTP_PARTIAL_DELIVERY_ABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 					      0, 0, 0, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	if (ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	/* If there is data waiting, send it up the socket now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		sp->data_ready_signalled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }