Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * net/tipc/msg.c: TIPC message header routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2005, 2010-2011, Wind River Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * modification, are permitted provided that the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *    notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *    documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * 3. Neither the names of the copyright holders nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *    contributors may be used to endorse or promote products derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *    this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Alternatively, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * GNU General Public License ("GPL") version 2 as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include "msg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include "addr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include "name_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include "crypto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define MAX_FORWARD_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define BUF_HEADROOM (LL_MAX_HEADER + 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define BUF_OVERHEAD BUF_HEADROOM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static unsigned int align(unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return (i + 3) & ~3u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * tipc_buf_acquire - creates a TIPC message buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * @size: message size (including TIPC header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * Returns a new buffer with data pointers set to the specified size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * NOTE: Headroom is reserved to allow prepending of a data link header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *       There may also be unrequested tailroom present at the buffer's end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		skb_reserve(skb, BUF_HEADROOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		skb_put(skb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		   u32 hsize, u32 dnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	memset(m, 0, hsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	msg_set_version(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	msg_set_user(m, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	msg_set_hdr_sz(m, hsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	msg_set_size(m, hsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	msg_set_prevnode(m, own_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	msg_set_type(m, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (hsize > SHORT_H_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		msg_set_orignode(m, own_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		msg_set_destnode(m, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) struct sk_buff *tipc_msg_create(uint user, uint type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				uint hdr_sz, uint data_sz, u32 dnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				u32 onode, u32 dport, u32 oport, int errcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct sk_buff *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (unlikely(!buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	msg = buf_msg(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	msg_set_size(msg, hdr_sz + data_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	msg_set_origport(msg, oport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	msg_set_destport(msg, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	msg_set_errcode(msg, errcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (hdr_sz > SHORT_H_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		msg_set_orignode(msg, onode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		msg_set_destnode(msg, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *            out: set when successful non-complete reassembly, otherwise NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * @*buf:     in:  the buffer to append. Always defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *            out: head buf after successful complete reassembly, otherwise NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * Returns 1 when reassembly complete, otherwise 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct sk_buff *head = *headbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct sk_buff *frag = *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct sk_buff *tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u32 fragid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	bool headstolen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (!frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	msg = buf_msg(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	fragid = msg_type(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	frag->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	skb_pull(frag, msg_hdr_sz(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (fragid == FIRST_FRAGMENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (unlikely(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		*buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (skb_has_frag_list(frag) && __skb_linearize(frag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		frag = skb_unshare(frag, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (unlikely(!frag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		head = *headbuf = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		TIPC_SKB_CB(head)->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		kfree_skb_partial(frag, headstolen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		tail = TIPC_SKB_CB(head)->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (!skb_has_frag_list(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			skb_shinfo(head)->frag_list = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			tail->next = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		head->truesize += frag->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		head->data_len += frag->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		head->len += frag->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		TIPC_SKB_CB(head)->tail = frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (fragid == LAST_FRAGMENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		TIPC_SKB_CB(head)->validated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		if (unlikely(!tipc_msg_validate(&head)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		*buf = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		TIPC_SKB_CB(head)->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		*headbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	*buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	kfree_skb(*buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	kfree_skb(*headbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	*buf = *headbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * tipc_msg_append(): Append data to tail of an existing buffer queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * @_hdr: header to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * @m: the data to be appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * @mss: max allowable size of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * @dlen: size of data to be appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * @txq: queue to appand to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * Returns the number og 1k blocks appended or errno value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		    int mss, struct sk_buff_head *txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	int accounted, total, curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	int mlen, cpy, rem = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	skb = skb_peek_tail(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	total = accounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		if (!skb || skb->len >= mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			skb = tipc_buf_acquire(mss, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			skb_trim(skb, MIN_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			msg_set_hdr_sz(hdr, MIN_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			msg_set_size(hdr, MIN_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			__skb_queue_tail(txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			total += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		curr = msg_blocks(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		mlen = msg_size(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		cpy = min_t(size_t, rem, mss - mlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		msg_set_size(hdr, mlen + cpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		skb_put(skb, cpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		rem -= cpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		total += msg_blocks(hdr) - curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	} while (rem > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return total - accounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* tipc_msg_validate - validate basic format of received message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * This routine ensures a TIPC message has an acceptable header, and at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * as much data as the header indicates it should.  The routine also ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  * that the entire message header is stored in the main fragment of the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * buffer, to simplify future access to message header fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * Note: Having extra info present in the message header or data areas is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * TIPC will ignore the excess, under the assumption that it is optional info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * introduced by a later release of the protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bool tipc_msg_validate(struct sk_buff **_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct sk_buff *skb = *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	int msz, hsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	/* Ensure that flow control ratio condition is satisfied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		kfree_skb(*_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		*_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (unlikely(TIPC_SKB_CB(skb)->validated))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	hsz = msg_hdr_sz(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (unlikely(!pskb_may_pull(skb, hsz)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (unlikely(msg_version(hdr) != TIPC_VERSION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	msz = msg_size(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (unlikely(msz < hsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (unlikely(skb->len < msz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	TIPC_SKB_CB(skb)->validated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * tipc_msg_fragment - build a fragment skb list for TIPC message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * @skb: TIPC message skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * @hdr: internal msg header to be put on the top of the fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * @pktmax: max size of a fragment incl. the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * @frags: returned fragment skb list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * or -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		      int pktmax, struct sk_buff_head *frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int pktno, nof_fragms, dsz, dmax, eat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct tipc_msg *_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct sk_buff *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	/* Non-linear buffer? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	data = (u8 *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	dsz = msg_size(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	dmax = pktmax - INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (dsz <= dmax || !dmax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	nof_fragms = dsz / dmax + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	for (pktno = 1; pktno <= nof_fragms; pktno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (pktno < nof_fragms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			eat = dmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			eat = dsz % dmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		/* Allocate a new fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (!_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		skb_orphan(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		__skb_queue_tail(frags, _skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		/* Copy header & data to the fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		data += eat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		/* Update the fragment's header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		_hdr = buf_msg(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		msg_set_fragm_no(_hdr, pktno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		msg_set_nof_fragms(_hdr, nof_fragms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		msg_set_size(_hdr, INT_H_SIZE + eat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	__skb_queue_purge(frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	__skb_queue_head_init(frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * tipc_msg_build - create buffer chain containing specified header and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * @mhdr: Message header, to be prepended to data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * @m: User message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * @dsz: Total length of user data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * @pktmax: Max packet size that can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * @list: Buffer or chain of buffers to be returned to caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * Note that the recursive call we are making here is safe, since it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * logically go only one further level down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * Returns message data size or errno: -ENOMEM, -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		   int dsz, int pktmax, struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	int mhsz = msg_hdr_sz(mhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct tipc_msg pkthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int msz = mhsz + dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	int pktrem = pktmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	int drem = dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	int pktno = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	char *pktpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	int pktsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	msg_set_size(mhdr, msz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	/* No fragmentation needed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (likely(msz <= pktmax)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		/* Fall back to smaller MTU if node local message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			if (pktmax != MAX_MSG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			rc = tipc_msg_build(mhdr, m, offset, dsz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 					    one_page_mtu, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			if (rc != dsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			if (tipc_msg_assemble(list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				return dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		skb_copy_to_linear_data(skb, mhdr, mhsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		pktpos = skb->data + mhsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			return dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	/* Prepare reusable fragment header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	msg_set_size(&pkthdr, pktmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	msg_set_fragm_no(&pkthdr, pktno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	msg_set_importance(&pkthdr, msg_importance(mhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	/* Prepare first fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	pktpos = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	pktpos += INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	pktrem -= INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	pktpos += mhsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	pktrem -= mhsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		if (drem < pktrem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			pktrem = drem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		drem -= pktrem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		if (!drem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		/* Prepare new fragment: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		if (drem < (pktmax - INT_H_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			pktsz = drem + INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			pktsz = pktmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		msg_set_type(&pkthdr, FRAGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		msg_set_size(&pkthdr, pktsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		msg_set_fragm_no(&pkthdr, ++pktno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		pktpos = skb->data + INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		pktrem = pktsz - INT_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	return dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	__skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	__skb_queue_head_init(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * @bskb: the bundle buffer to append to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  * @msg: message to be appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  * @max: max allowable size for the bundle buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  * Returns "true" if bundling has been performed, otherwise "false"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			    u32 max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	struct tipc_msg *bmsg = buf_msg(bskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	u32 msz, bsz, offset, pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	msz = msg_size(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	bsz = msg_size(bmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	offset = align(bsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	pad = offset - bsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (unlikely(max < (offset + msz)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	skb_put(bskb, pad + msz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	msg_set_size(bmsg, offset + msz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)  * tipc_msg_try_bundle - Try to bundle a new message to the last one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  * @tskb: the last/target message to which the new one will be appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)  * @skb: the new message skb pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)  * @mss: max message size (header inclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  * @dnode: destination node for the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * @new_bundle: if this call made a new bundle or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  * Return: "true" if the new message skb is potential for bundling this time or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * later, in the case a bundling has been done this time, the skb is consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * (the skb pointer = NULL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  * Otherwise, "false" if the skb cannot be bundled at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			 u32 dnode, bool *new_bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct tipc_msg *msg, *inner, *outer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	u32 tsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	/* First, check if the new buffer is suitable for bundling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	msg = buf_msg(*skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	if (msg_user(msg) == MSG_FRAGMENTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	if (msg_user(msg) == TUNNEL_PROTOCOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (msg_user(msg) == BCAST_PROTOCOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (mss <= INT_H_SIZE + msg_size(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	/* Ok, but the last/target buffer can be empty? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (unlikely(!tskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	/* Is it a bundle already? Try to bundle the new message to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		*new_bundle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		goto bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	/* Make a new bundle of the two messages if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	tsz = msg_size(buf_msg(tskb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 				      GFP_ATOMIC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	inner = buf_msg(tskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	skb_push(tskb, INT_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	outer = buf_msg(tskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		      dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	msg_set_importance(outer, msg_importance(inner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	msg_set_size(outer, INT_H_SIZE + tsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	msg_set_msgcnt(outer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	*new_bundle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bundle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		consume_skb(*skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		*skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  *  tipc_msg_extract(): extract bundled inner packet from buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  *  @skb: buffer to be extracted from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  *  @iskb: extracted inner buffer, to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  *  @pos: position in outer message of msg to be extracted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  *        Returns position of next msg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  *  Consumes outer buffer when last packet extracted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  *  Returns true when there is an extracted buffer, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	struct tipc_msg *hdr, *ihdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	int imsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	*iskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (unlikely(skb_linearize(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		goto none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		goto none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	imsz = msg_size(ihdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if ((*pos + imsz) > msg_data_sz(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		goto none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (!*iskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		goto none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	skb_copy_to_linear_data(*iskb, ihdr, imsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	if (unlikely(!tipc_msg_validate(iskb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		goto none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	*pos += align(imsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) none:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	kfree_skb(*iskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	*iskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)  * tipc_msg_reverse(): swap source and destination addresses and add error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)  * @own_node: originating node id for reversed message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)  * @skb:  buffer containing message to be reversed; will be consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)  * @err:  error code to be set in message, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)  * Replaces consumed buffer with new one when successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)  * Returns true if success, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	struct sk_buff *_skb = *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct tipc_msg *_hdr, *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	int hlen, dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	if (skb_linearize(_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	_hdr = buf_msg(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	hlen = msg_hdr_sz(_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	if (msg_dest_droppable(_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (msg_errcode(_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	/* Never return SHORT header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	if (hlen == SHORT_H_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		hlen = BASIC_H_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	/* Don't return data along with SYN+, - sender has a clone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	/* Allocate new buffer to return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if (!*skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	/* Build reverse header in new buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	hdr = buf_msg(*skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	msg_set_hdr_sz(hdr, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	msg_set_errcode(hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	msg_set_non_seq(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	msg_set_origport(hdr, msg_destport(_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	msg_set_destport(hdr, msg_origport(_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	msg_set_destnode(hdr, msg_prevnode(_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	msg_set_prevnode(hdr, own_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	msg_set_orignode(hdr, own_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	msg_set_size(hdr, hlen + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	skb_orphan(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	kfree_skb(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	kfree_skb(_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	*skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	struct sk_buff *skb, *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	skb_queue_walk(msg, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		_skb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 		if (!_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 			__skb_queue_purge(cpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 			pr_err_ratelimited("Failed to clone buffer chain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		__skb_queue_tail(cpy, _skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)  * tipc_msg_lookup_dest(): try to find new destination for named message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)  * @skb: the buffer containing the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)  * @err: error code to be used by caller if lookup fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)  * Does not consume buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)  * Returns true if a destination is found, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	struct tipc_msg *msg = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	u32 dport, dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	u32 onode = tipc_own_addr(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	if (!msg_isdata(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	if (!msg_named(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	if (msg_errcode(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	*err = TIPC_ERR_NO_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	msg = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	if (msg_reroute_cnt(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	dnode = tipc_scope2node(net, msg_lookup_scope(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 				       msg_nameinst(msg), &dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	if (!dport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	msg_incr_reroute_cnt(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	if (dnode != onode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		msg_set_prevnode(msg, onode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	msg_set_destnode(msg, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	msg_set_destport(msg, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	*err = TIPC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* tipc_msg_assemble() - assemble chain of fragments into one message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bool tipc_msg_assemble(struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	struct sk_buff *skb, *tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	if (skb_queue_len(list) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	while ((skb = __skb_dequeue(list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		skb->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		if (tipc_buf_append(&tmp, &skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 			__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	__skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	__skb_queue_head_init(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	pr_warn("Failed do assemble buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* tipc_msg_reassemble() - clone a buffer chain of fragments and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)  *                         reassemble the clones into one message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	struct sk_buff *skb, *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	struct sk_buff *frag = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	struct sk_buff *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	int hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	/* Copy header if single buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	if (skb_queue_len(list) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		skb = skb_peek(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		if (!_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		__skb_queue_tail(rcvq, _skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	/* Clone all fragments and reassemble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	skb_queue_walk(list, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		frag = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		if (!frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		frag->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		if (tipc_buf_append(&head, &frag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	__skb_queue_tail(rcvq, frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	pr_warn("Failed do clone local mcast rcv buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	kfree_skb(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 			struct sk_buff_head *cpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	struct sk_buff *skb, *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	skb_queue_walk(msg, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		_skb = pskb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		if (!_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 			__skb_queue_purge(cpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 		msg_set_destnode(buf_msg(_skb), dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		__skb_queue_tail(cpy, _skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)  * @list: list to be appended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)  * @seqno: sequence number of buffer to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)  * @skb: buffer to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 			     struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	struct sk_buff *_skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		__skb_queue_head(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	skb_queue_walk_safe(list, _skb, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 		if (more(seqno, buf_seqno(_skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		if (seqno == buf_seqno(_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 		__skb_queue_before(list, _skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		     struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		__skb_queue_tail(xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }