Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * net/tipc/socket.c: TIPC socket API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * modification, are permitted provided that the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *    notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *    documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * 3. Neither the names of the copyright holders nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    contributors may be used to endorse or promote products derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *    this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Alternatively, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * GNU General Public License ("GPL") version 2 as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "name_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "link.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "name_distr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "socket.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "bcast.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include "netlink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include "group.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define NAGLE_START_INIT	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define NAGLE_START_MAX		1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define CONN_TIMEOUT_DEFAULT    8000    /* default connect timeout = 8s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define CONN_PROBING_INTV	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define TIPC_MAX_PORT		0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define TIPC_MIN_PORT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define TIPC_ACK_RATE		4       /* ACK at 1/4 of rcv window size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	TIPC_LISTEN = TCP_LISTEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	TIPC_ESTABLISHED = TCP_ESTABLISHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	TIPC_OPEN = TCP_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	TIPC_CONNECTING = TCP_SYN_SENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) struct sockaddr_pair {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct sockaddr_tipc sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct sockaddr_tipc member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * struct tipc_sock - TIPC socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * @sk: socket - interacts with 'port' and with user via the socket API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * @conn_type: TIPC type used when connection was established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * @conn_instance: TIPC instance used when connection was established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * @published: non-zero if port has one or more associated names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * @max_pkt: maximum packet size "hint" used when building messages sent by port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * @maxnagle: maximum size of msg which can be subject to nagle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * @portid: unique port identity in TIPC socket hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * @phdr: preformatted message header used when sending messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * #cong_links: list of congested links
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * @publications: list of publications for port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * @blocking_link: address of the congested link we are currently sleeping on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * @pub_count: total # of publications port has made during its lifetime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * @conn_timeout: the time we can wait for an unresponded setup request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * @cong_link_cnt: number of congested links
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * @snt_unacked: # messages sent by socket, and not yet acked by peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * @rcv_unacked: # messages read by user, but not yet acked back to peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * @peer: 'connected' peer for dgram/rdm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * @node: hash table node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * @mc_method: cookie for use between socket and broadcast layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * @rcu: rcu struct for tipc_sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) struct tipc_sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct sock sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	u32 conn_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	u32 conn_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	int published;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	u32 max_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u32 maxnagle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u32 portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct tipc_msg phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct list_head cong_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct list_head publications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u32 pub_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	atomic_t dupl_rcvcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	u16 conn_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	bool probe_unacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	u16 cong_link_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	u16 snt_unacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	u16 snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u16 peer_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	u16 rcv_unacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	u16 rcv_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct sockaddr_tipc peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct rhash_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct tipc_mc_method mc_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	struct tipc_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u32 oneway;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u32 nagle_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u16 snd_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	u16 msg_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	u16 pkt_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	bool expect_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	bool nodelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	bool group_is_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static void tipc_data_ready(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static void tipc_write_space(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static void tipc_sock_destruct(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static int tipc_release(struct socket *sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		       bool kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static void tipc_sk_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			   struct tipc_name_seq const *seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			    struct tipc_name_seq const *seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static int tipc_sk_leave(struct tipc_sock *tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static int tipc_sk_insert(struct tipc_sock *tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static void tipc_sk_remove(struct tipc_sock *tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static const struct proto_ops packet_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static const struct proto_ops stream_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static const struct proto_ops msg_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static struct proto tipc_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static const struct rhashtable_params tsk_rht_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static u32 tsk_own_node(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	return msg_prevnode(&tsk->phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static u32 tsk_peer_node(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return msg_destnode(&tsk->phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static u32 tsk_peer_port(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	return msg_destport(&tsk->phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static  bool tsk_unreliable(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	return msg_src_droppable(&tsk->phdr) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static bool tsk_unreturnable(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return msg_dest_droppable(&tsk->phdr) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static int tsk_importance(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return msg_importance(&tsk->phdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static struct tipc_sock *tipc_sk(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	return container_of(sk, struct tipc_sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) int tsk_set_importance(struct sock *sk, int imp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (imp > TIPC_CRITICAL_IMPORTANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static bool tsk_conn_cong(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return tsk->snt_unacked > tsk->snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static u16 tsk_blocks(int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	return ((len / FLOWCTL_BLK_SZ) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /* tsk_blocks(): translate a buffer size in bytes to number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * advertisable blocks, taking into account the ratio truesize(len)/len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static u16 tsk_adv_blocks(int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	return len / FLOWCTL_BLK_SZ / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) /* tsk_inc(): increment counter for sent or received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * - If block based flow control is not supported by peer we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  *   fall back to message based ditto, incrementing the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		return ((msglen / FLOWCTL_BLK_SZ) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) /* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static void tsk_set_nagle(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	tsk->maxnagle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if (sk->sk_type != SOCK_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (tsk->nodelay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (!(tsk->peer_caps & TIPC_NAGLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	/* Limit node local buffer size to avoid receive queue overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (tsk->max_pkt == MAX_MSG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		tsk->maxnagle = 1500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		tsk->maxnagle = tsk->max_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * tsk_advance_rx_queue - discard first buffer in socket receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * Caller must hold socket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static void tsk_advance_rx_queue(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /* tipc_sk_respond() : send response message back to sender
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	u32 selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	u32 dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	u32 onode = tipc_own_addr(sock_net(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (!tipc_msg_reverse(onode, &skb, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	dnode = msg_destnode(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	selector = msg_origport(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * tsk_rej_rx_queue - reject all buffers in socket receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * Caller must hold socket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) static void tsk_rej_rx_queue(struct sock *sk, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		tipc_sk_respond(sk, skb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static bool tipc_sk_connected(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	return sk->sk_state == TIPC_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /* tipc_sk_type_connectionless - check if the socket is datagram socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * Returns true if connection less, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static bool tipc_sk_type_connectionless(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) /* tsk_peer_msg - verify if message was sent by connected port's peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * Handles cases where the node's network address has changed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * the default of <0.0.0> to its configured setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	u32 self = tipc_own_addr(sock_net(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	u32 peer_port = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	u32 orig_node, peer_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (unlikely(!tipc_sk_connected(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (unlikely(msg_origport(msg) != peer_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	orig_node = msg_orignode(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	peer_node = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (likely(orig_node == peer_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (!orig_node && peer_node == self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (!peer_node && orig_node == self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) /* tipc_set_sk_state - set the sk_state of the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * Caller must hold socket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static int tipc_set_sk_state(struct sock *sk, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	int oldsk_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	int res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	case TIPC_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	case TIPC_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	case TIPC_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		if (oldsk_state == TIPC_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	case TIPC_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		if (oldsk_state == TIPC_CONNECTING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		    oldsk_state == TIPC_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	case TIPC_DISCONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		if (oldsk_state == TIPC_CONNECTING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		    oldsk_state == TIPC_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		sk->sk_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static int tipc_sk_sock_err(struct socket *sock, long *timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	int err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	int typ = sock->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (sk->sk_state == TIPC_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		else if (!tipc_sk_connected(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (!*timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return sock_intr_errno(*timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) ({                                                                             \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct sock *sk_;						       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	int rc_;							       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 									       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	while ((rc_ = !(condition_))) {					       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		smp_rmb();                                                     \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		sk_ = (sock_)->sk;					       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (rc_)						       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			break;						       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		add_wait_queue(sk_sleep(sk_), &wait_);                         \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		release_sock(sk_);					       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		sched_annotate_sleep();				               \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		lock_sock(sk_);						       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}								       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	rc_;								       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * tipc_sk_create - create a TIPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * @net: network namespace (must be default network)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * @sock: pre-allocated socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * @protocol: protocol indicator (must be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  * @kern: caused by kernel or by userspace?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * This routine creates additional data structures used by the TIPC socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * initializes them, and links them together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static int tipc_sk_create(struct net *net, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			  int protocol, int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	const struct proto_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* Validate arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (unlikely(protocol != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	switch (sock->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	case SOCK_STREAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		ops = &stream_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	case SOCK_SEQPACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		ops = &packet_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	case SOCK_DGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	case SOCK_RDM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		ops = &msg_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		return -EPROTOTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	/* Allocate socket's protocol area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	tsk->max_pkt = MAX_PKT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	tsk->maxnagle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	tsk->nagle_start = NAGLE_START_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	INIT_LIST_HEAD(&tsk->publications);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	INIT_LIST_HEAD(&tsk->cong_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	msg = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Finish initializing socket data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	sock->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	tipc_set_sk_state(sk, TIPC_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (tipc_sk_insert(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		pr_warn("Socket create failed; port number exhausted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* Ensure tsk is visible before we read own_addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		      TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	msg_set_origport(msg, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	sk->sk_shutdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	sk->sk_data_ready = tipc_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	sk->sk_write_space = tipc_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	sk->sk_destruct = tipc_sock_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	tsk->group_is_open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	atomic_set(&tsk->dupl_rcvcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	/* Start out with safe limits until we receive an advertised window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	tsk->rcv_win = tsk->snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (tipc_sk_type_connectionless(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		tsk_set_unreturnable(tsk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		if (sock->type == SOCK_DGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			tsk_set_unreliable(tsk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	__skb_queue_head_init(&tsk->mc_method.deferredq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static void tipc_sk_callback(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) /* Caller should hold socket lock for the socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void __tipc_shutdown(struct socket *sock, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	u32 dnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 					    !tsk_conn_cong(tsk)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* Push out delayed messages if in Nagle mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	tipc_sk_push_backlog(tsk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* Remove pending SYN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	__skb_queue_purge(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/* Remove partially received buffer if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (skb && TIPC_SKB_CB(skb)->bytes_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		__skb_unlink(skb, &sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* Reject all unreceived messages if connectionless */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (tipc_sk_type_connectionless(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		tsk_rej_rx_queue(sk, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	case TIPC_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	case TIPC_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		tipc_node_remove_conn(net, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		/* Send a FIN+/- to its peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		skb = __skb_dequeue(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			__skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			tipc_sk_respond(sk, skb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				      tsk_own_node(tsk), tsk_peer_port(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 				      tsk->portid, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	case TIPC_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		/* Reject all SYN messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		tsk_rej_rx_queue(sk, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		__skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * tipc_release - destroy a TIPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * @sock: socket to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * This routine cleans up any messages that are still queued on the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * For DGRAM and RDM socket types, all queued messages are rejected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * For SEQPACKET and STREAM socket types, the first message is rejected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * and any others are discarded.  (If the first message on a STREAM socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  * is partially-read, it is discarded and the next one is rejected instead.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * NOTE: Rejected messages are not necessarily returned to the sender!  They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * are returned or discarded according to the "destination droppable" setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * specified for the message by the sender.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static int tipc_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * Exit if socket isn't fully initialized (occurs when a failed accept()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * releases a pre-allocated child socket that was never used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	sk->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	tipc_sk_leave(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	tipc_sk_withdraw(tsk, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	__skb_queue_purge(&tsk->mc_method.deferredq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	sk_stop_timer(sk, &sk->sk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	tipc_sk_remove(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/* Reject any messages that accumulated in backlog queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	tipc_dest_list_purge(&tsk->cong_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	tsk->cong_link_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	call_rcu(&tsk->rcu, tipc_sk_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	sock->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * tipc_bind - associate or disassocate TIPC name(s) with a socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * @uaddr: socket address describing name(s) and desired operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * @uaddr_len: size of socket address data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  * Name and name sequence binding is indicated using a positive scope value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  * a negative scope value unbinds the specified name.  Specifying no name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * (i.e. a socket address length of 0) unbinds all names from the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * NOTE: This routine doesn't need to take the socket lock since it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *       access any non-constant socket information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		     int uaddr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	int res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (unlikely(!uaddr_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		res = tipc_sk_withdraw(tsk, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (tsk->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		res = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (addr->family != AF_TIPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		res = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (addr->addrtype == TIPC_ADDR_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		res = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		res = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	res = (addr->scope >= 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * tipc_getname - get port ID of socket or peer socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * @uaddr: area for returned socket address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * NOTE: This routine doesn't need to take the socket lock since it only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  *       accesses socket information that is unchanging (or which changes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  *       a completely predictable manner).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	memset(addr, 0, sizeof(*addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		if ((!tipc_sk_connected(sk)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		addr->addr.id.ref = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		addr->addr.id.node = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		addr->addr.id.ref = tsk->portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		addr->addr.id.node = tipc_own_addr(sock_net(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	addr->addrtype = TIPC_ADDR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	addr->family = AF_TIPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	addr->scope = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	addr->addr.name.domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	return sizeof(*addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * tipc_poll - read and possibly block on pollmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * @file: file structure associated with the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * @sock: socket for which to calculate the poll bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * @wait: ???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * Returns pollmask value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * COMMENTARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * It appears that the usual socket locking mechanisms are not useful here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * since the pollmask info is potentially out-of-date the moment this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * exits.  TCP and other protocols seem to rely on higher level poll routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * to handle any preventable race conditions, so TIPC will do the same ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * IMPORTANT: The fact that a read or write operation is indicated does NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * imply that the operation will succeed, merely that it should be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * and will not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static __poll_t tipc_poll(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			      poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	__poll_t revents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	sock_poll_wait(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (sk->sk_shutdown == SHUTDOWN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		revents |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	case TIPC_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			revents |= EPOLLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	case TIPC_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	case TIPC_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			revents |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	case TIPC_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		if (tsk->group_is_open && !tsk->cong_link_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			revents |= EPOLLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		if (!tipc_sk_type_connectionless(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (skb_queue_empty_lockless(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		revents |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	case TIPC_DISCONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return revents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * tipc_sendmcast - send multicast message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * @seq: destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * @msg: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * @dlen: length of data to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * @timeout: timeout to wait for wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * Called from function tipc_sendmsg(), which has done all sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * Returns the number of bytes sent on success, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			  struct msghdr *msg, size_t dlen, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	int mtu = tipc_bcast_get_mtu(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct tipc_mc_method *method = &tsk->mc_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct sk_buff_head pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct tipc_nlist dsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (tsk->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* Block or return if any destination link is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/* Lookup destination nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	tipc_nlist_init(&dsts, tipc_own_addr(net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				      seq->upper, &dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (!dsts.local && !dsts.remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/* Build message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	msg_set_type(hdr, TIPC_MCAST_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	msg_set_destport(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	msg_set_destnode(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	msg_set_nametype(hdr, seq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	msg_set_namelower(hdr, seq->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	msg_set_nameupper(hdr, seq->upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	/* Build message as chain of buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	__skb_queue_head_init(&pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/* Send message if build was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (unlikely(rc == dlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 					TIPC_DUMP_SK_SNDQ, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 				     &tsk->cong_link_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	tipc_nlist_purge(&dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	return rc ? rc : dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * tipc_send_group_msg - send a message to a member in the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  * @net: network namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * @mb: group member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * @dnode: destination node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  * @dport: destination port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * @dlen: total length of message data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			       struct msghdr *m, struct tipc_member *mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			       u32 dnode, u32 dport, int dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct tipc_mc_method *method = &tsk->mc_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	struct sk_buff_head pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	int mtu, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	/* Complete message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	msg_set_destport(hdr, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	msg_set_destnode(hdr, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/* Build message as chain of buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	__skb_queue_head_init(&pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (unlikely(rc != dlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	/* Send message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (unlikely(rc == -ELINKCONG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		tipc_dest_push(&tsk->cong_links, dnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		tsk->cong_link_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/* Update send window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	tipc_group_update_member(mb, blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* A broadcast sent within next EXPIRE period must follow same path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	method->rcast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	method->mandatory = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	return dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * tipc_send_group_unicast - send message to a member in the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * @dlen: total length of message data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * @timeout: timeout to wait for wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * Called from function tipc_sendmsg(), which has done all sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * Returns the number of bytes sent on success, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 				   int dlen, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct tipc_member *mb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	u32 node, port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	node = dest->addr.id.node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	port = dest->addr.id.ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (!port && !node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/* Block or return if destination link or member is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	rc = tipc_wait_for_cond(sock, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				tsk->group &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 				!tipc_group_cong(tsk->group, node, port, blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 						 &mb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (unlikely(!mb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return rc ? rc : dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * tipc_send_group_anycast - send message to any member with given identity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * @dlen: total length of message data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  * @timeout: timeout to wait for wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * Called from function tipc_sendmsg(), which has done all sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * Returns the number of bytes sent on success, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				   int dlen, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct list_head *cong_links = &tsk->cong_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct tipc_member *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct tipc_member *mbr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	u32 node, port, exclude;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct list_head dsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	u32 type, inst, scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	int lookups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	int dstcnt, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	bool cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	INIT_LIST_HEAD(&dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	type = msg_nametype(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	inst = dest->addr.name.name.instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	scope = msg_lookup_scope(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	while (++lookups < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		exclude = tipc_group_exclude(tsk->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		/* Look for a non-congested destination member, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 						 &dstcnt, exclude, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 				return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			tipc_dest_pop(&dsts, &node, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			cong = tipc_group_cong(tsk->group, node, port, blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					       &mbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			if (!cong)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			if (mbr == first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				first = mbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		/* Start over if destination was not in member list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		if (unlikely(!mbr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		/* Block or return if destination link or member is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		rc = tipc_wait_for_cond(sock, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 					!tipc_dest_find(cong_links, node, 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 					tsk->group &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 					!tipc_group_cong(tsk->group, node, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 							 blks, &mbr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		/* Send, unless destination disappeared while waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		if (likely(mbr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (unlikely(lookups >= 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	return rc ? rc : dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * tipc_send_group_bcast - send message to all members in communication group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * @dlen: total length of message data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * @timeout: timeout to wait for wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * Called from function tipc_sendmsg(), which has done all sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * Returns the number of bytes sent on success, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				 int dlen, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	struct tipc_nlist *dsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	struct tipc_mc_method *method = &tsk->mc_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	bool ack = method->mandatory && method->rcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	int mtu = tipc_bcast_get_mtu(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct sk_buff_head pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	int rc = -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/* Block or return if any destination link or member is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	rc = tipc_wait_for_cond(sock, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				!tsk->cong_link_cnt && tsk->group &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				!tipc_group_bc_cong(tsk->group, blks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	dsts = tipc_group_dests(tsk->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (!dsts->local && !dsts->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* Complete message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		msg_set_nameinst(hdr, dest->addr.name.name.instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		msg_set_nameinst(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	msg_set_destport(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	msg_set_destnode(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	/* Avoid getting stuck with repeated forced replicasts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	msg_set_grp_bc_ack_req(hdr, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	/* Build message as chain of buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	__skb_queue_head_init(&pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (unlikely(rc != dlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	/* Send message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	/* Update broadcast sequence number and send windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	tipc_group_update_bc_members(tsk->group, blks, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	/* Broadcast link is now free to choose method for next broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	method->mandatory = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	method->expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	return dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  * tipc_send_group_mcast - send message to all members with given identity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * @dlen: total length of message data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * @timeout: timeout to wait for wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * Called from function tipc_sendmsg(), which has done all sanity checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * Returns the number of bytes sent on success, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				 int dlen, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	u32 type, inst, scope, exclude;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct list_head dsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	u32 dstcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	INIT_LIST_HEAD(&dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	type = msg_nametype(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	inst = dest->addr.name.name.instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	scope = msg_lookup_scope(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	exclude = tipc_group_exclude(grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				 &dstcnt, exclude, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (dstcnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		return tipc_send_group_unicast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	tipc_dest_list_purge(&dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	return tipc_send_group_bcast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  * @arrvq: queue with arriving messages, to be cloned after destination lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)  * @inputq: queue with cloned messages, delivered to socket after dest lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)  * Multi-threaded: parallel calls with reference to same queues may occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		       struct sk_buff_head *inputq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	u32 self = tipc_own_addr(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	u32 type, lower, upper, scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	struct sk_buff *skb, *_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	u32 portid, onode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	struct sk_buff_head tmpq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	struct list_head dports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int user, mtyp, hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	bool exact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	__skb_queue_head_init(&tmpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	INIT_LIST_HEAD(&dports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	skb = tipc_skb_peek(arrvq, &inputq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		user = msg_user(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		mtyp = msg_type(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		onode = msg_orignode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		type = msg_nametype(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			spin_lock_bh(&inputq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			if (skb_peek(arrvq) == skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				__skb_dequeue(arrvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				__skb_queue_tail(inputq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			spin_unlock_bh(&inputq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		/* Group messages require exact scope match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		if (msg_in_group(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			lower = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			upper = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			scope = msg_lookup_scope(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			exact = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			/* TIPC_NODE_SCOPE means "any scope" in this context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			if (onode == self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 				scope = TIPC_NODE_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				scope = TIPC_CLUSTER_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			exact = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			lower = msg_namelower(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			upper = msg_nameupper(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		/* Create destination port list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		tipc_nametbl_mc_lookup(net, type, lower, upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				       scope, exact, &dports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		/* Clone message per destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		while (tipc_dest_pop(&dports, NULL, &portid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			if (_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				msg_set_destport(buf_msg(_skb), portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				__skb_queue_tail(&tmpq, _skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			pr_warn("Failed to clone mcast rcv buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		/* Append to inputq if not already done by other thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		spin_lock_bh(&inputq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		if (skb_peek(arrvq) == skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			skb_queue_splice_tail_init(&tmpq, inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			/* Decrease the skb's refcnt as increasing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			 * function tipc_skb_peek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			kfree_skb(__skb_dequeue(arrvq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		spin_unlock_bh(&inputq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		__skb_queue_purge(&tmpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	tipc_sk_rcv(net, inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  *                         when socket is in Nagle mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct sk_buff *skb = skb_peek_tail(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct net *net = sock_net(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	u32 dnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (nagle_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		tsk->pkt_cnt += skb_queue_len(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			tsk->oneway = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			if (tsk->nagle_start < NAGLE_START_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				tsk->nagle_start *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			tsk->expect_ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				 tsk->nagle_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			tsk->nagle_start = NAGLE_START_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				msg_set_ack_required(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				tsk->expect_ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				tsk->expect_ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		tsk->msg_acc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		tsk->pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (!skb || tsk->cong_link_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	/* Do not send SYN again after congestion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (msg_is_syn(buf_msg(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (tsk->msg_acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		tsk->pkt_cnt += skb_queue_len(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	tsk->snt_unacked += tsk->snd_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	tsk->snd_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (rc == -ELINKCONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		tsk->cong_link_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * @tsk: receiving socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * @skb: pointer to message buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 				   struct sk_buff_head *inputq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 				   struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	u32 onode = tsk_own_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int mtyp = msg_type(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	bool was_cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	/* Ignore if connection cannot be validated: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (!tsk_peer_msg(tsk, hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (unlikely(msg_errcode(hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 				      tsk_peer_port(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		/* State change is ignored if socket already awake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		 * - convert msg to abort msg and add to inqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		msg_set_type(hdr, TIPC_CONN_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		msg_set_size(hdr, BASIC_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		__skb_queue_tail(inputq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	tsk->probe_unacked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (mtyp == CONN_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		msg_set_type(hdr, CONN_PROBE_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			__skb_queue_tail(xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	} else if (mtyp == CONN_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		was_cong = tsk_conn_cong(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		tsk->snt_unacked -= msg_conn_ack(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			tsk->snd_win = msg_adv_win(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		if (was_cong && !tsk_conn_cong(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	} else if (mtyp != CONN_PROBE_REPLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		pr_warn("Received unknown CONN_PROTO msg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  * tipc_sendmsg - send message in connectionless manner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * @dsz: amount of user data to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * Message must have an destination specified explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  * Used for SOCK_RDM and SOCK_DGRAM messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  * Returns the number of bytes sent on success, or errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int tipc_sendmsg(struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			struct msghdr *m, size_t dsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	ret = __tipc_sendmsg(sock, m, dsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	struct list_head *clinks = &tsk->cong_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	bool syn = !tipc_sk_type_connectionless(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	struct tipc_name_seq *seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct sk_buff_head pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	u32 dport = 0, dnode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	u32 type = 0, inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	int mtu, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (likely(dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (unlikely(m->msg_namelen < sizeof(*dest)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		if (unlikely(dest->family != AF_TIPC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (grp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		if (!dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 			return tipc_send_group_bcast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		if (dest->addrtype == TIPC_ADDR_NAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			return tipc_send_group_anycast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		if (dest->addrtype == TIPC_ADDR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			return tipc_send_group_unicast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		if (dest->addrtype == TIPC_ADDR_MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			return tipc_send_group_mcast(sock, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (unlikely(!dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		dest = &tsk->peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		if (!syn && dest->family != AF_TIPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			return -EDESTADDRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (unlikely(syn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		if (sk->sk_state == TIPC_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		if (sk->sk_state != TIPC_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			return -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		if (tsk->published)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		if (dest->addrtype == TIPC_ADDR_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			tsk->conn_type = dest->addr.name.name.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			tsk->conn_instance = dest->addr.name.name.instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		msg_set_syn(hdr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	seq = &dest->addr.nameseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (dest->addrtype == TIPC_ADDR_MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return tipc_sendmcast(sock, seq, m, dlen, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (dest->addrtype == TIPC_ADDR_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		type = dest->addr.name.name.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		inst = dest->addr.name.name.instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		dnode = dest->addr.name.domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		dport = tipc_nametbl_translate(net, type, inst, &dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		if (unlikely(!dport && !dnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	} else if (dest->addrtype == TIPC_ADDR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		dnode = dest->addr.id.node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	/* Block or return if destination link is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	rc = tipc_wait_for_cond(sock, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 				!tipc_dest_find(clinks, dnode, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (dest->addrtype == TIPC_ADDR_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		msg_set_type(hdr, TIPC_NAMED_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		msg_set_nametype(hdr, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		msg_set_nameinst(hdr, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		msg_set_destnode(hdr, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		msg_set_destport(hdr, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	} else { /* TIPC_ADDR_ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		msg_set_type(hdr, TIPC_DIRECT_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		msg_set_lookup_scope(hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		msg_set_destnode(hdr, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		msg_set_destport(hdr, dest->addr.id.ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	__skb_queue_head_init(&pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	if (unlikely(rc != dlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		__skb_queue_purge(&pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (unlikely(rc == -ELINKCONG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		tipc_dest_push(clinks, dnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		tsk->cong_link_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (unlikely(syn && !rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		tipc_set_sk_state(sk, TIPC_CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		if (dlen && timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			timeout = msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			tipc_wait_for_connect(sock, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	return rc ? rc : dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  * tipc_sendstream - send stream-oriented data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * @m: data to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  * @dsz: total length of data to be transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * Used for SOCK_STREAM data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  * Returns the number of bytes sent on success (or partial success),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  * or errno if no data sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	ret = __tipc_sendstream(sock, m, dsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	struct sk_buff_head *txq = &sk->sk_write_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	u32 dnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	int maxnagle = tsk->maxnagle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	int maxpkt = tsk->max_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	int send, sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	int blocks, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (unlikely(dlen > INT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	/* Handle implicit connection setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		rc = __tipc_sendmsg(sock, m, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		if (dlen && dlen == rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		rc = tipc_wait_for_cond(sock, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 					(!tsk->cong_link_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 					 !tsk_conn_cong(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 					 tipc_sk_connected(sk)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		blocks = tsk->snd_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		    send <= maxnagle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			if (unlikely(rc < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			blocks += rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			tsk->msg_acc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			if (blocks <= 64 && tsk->expect_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 				tsk->snd_backlog = blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 				sent += send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			} else if (blocks > 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 				tsk->pkt_cnt += skb_queue_len(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 				skb = skb_peek_tail(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 				if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 					msg_set_ack_required(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 					tsk->expect_ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 					tsk->expect_ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				tsk->msg_acc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 				tsk->pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			if (unlikely(rc != send))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 			blocks += tsk_inc(tsk, send + MIN_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		trace_tipc_sk_sendstream(sk, skb_peek(txq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 					 TIPC_DUMP_SK_SNDQ, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		if (unlikely(rc == -ELINKCONG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			tsk->cong_link_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		if (likely(!rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			tsk->snt_unacked += blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			tsk->snd_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 			sent += send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	} while (sent < dlen && !rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	return sent ? sent : rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * tipc_send_packet - send a connection-oriented message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  * @m: message to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  * @dsz: length of data to be transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)  * Used for SOCK_SEQPACKET messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)  * Returns the number of bytes sent on success, or errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (dsz > TIPC_MAX_USER_MSG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	return tipc_sendstream(sock, m, dsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /* tipc_sk_finish_conn - complete the setup of a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				u32 peer_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	struct tipc_msg *msg = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	msg_set_syn(msg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	msg_set_destnode(msg, peer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	msg_set_destport(msg, peer_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	msg_set_type(msg, TIPC_CONN_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	msg_set_lookup_scope(msg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	msg_set_hdr_sz(msg, SHORT_H_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	tsk_set_nagle(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	__skb_queue_purge(&sk->sk_write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	/* Fall back to message based flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	tsk->rcv_win = FLOWCTL_MSG_WIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	tsk->snd_win = FLOWCTL_MSG_WIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)  * tipc_sk_set_orig_addr - capture sender's address for received message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  * @m: descriptor for message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)  * @skb: received message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * Note: Address is not captured if not requested by receiver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	if (!srcaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	srcaddr->sock.family = AF_TIPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	srcaddr->sock.addrtype = TIPC_ADDR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	srcaddr->sock.scope = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	srcaddr->sock.addr.id.ref = msg_origport(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	srcaddr->sock.addr.id.node = msg_orignode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	srcaddr->sock.addr.name.domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	m->msg_namelen = sizeof(struct sockaddr_tipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	if (!msg_in_group(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	/* Group message users may also want to know sending member's id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	srcaddr->member.family = AF_TIPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	srcaddr->member.addrtype = TIPC_ADDR_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	srcaddr->member.scope = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	srcaddr->member.addr.name.domain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	m->msg_namelen = sizeof(*srcaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * @m: descriptor for message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * @skb: received message buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  * @tsk: TIPC port associated with message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * Note: Ancillary data is not captured if not requested by receiver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  * Returns 0 if successful, otherwise errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 				 struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	u32 anc_data[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	u32 err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	u32 dest_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	int has_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (likely(m->msg_controllen == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	msg = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	/* Optionally capture errored message object(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	err = msg ? msg_errcode(msg) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		anc_data[0] = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		anc_data[1] = msg_data_sz(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		if (anc_data[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			msg = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 				       msg_data(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 				return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	/* Optionally capture message destination object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	switch (dest_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	case TIPC_NAMED_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		has_name = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		anc_data[0] = msg_nametype(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		anc_data[1] = msg_namelower(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		anc_data[2] = msg_namelower(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	case TIPC_MCAST_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		has_name = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		anc_data[0] = msg_nametype(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		anc_data[1] = msg_namelower(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		anc_data[2] = msg_nameupper(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	case TIPC_CONN_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		has_name = (tsk->conn_type != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		anc_data[0] = tsk->conn_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		anc_data[1] = tsk->conn_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		anc_data[2] = tsk->conn_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		has_name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	if (has_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	u32 peer_port = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	u32 dnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (!tipc_sk_connected(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 			      dnode, tsk_own_node(tsk), peer_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			      tsk->portid, TIPC_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	msg = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	msg_set_conn_ack(msg, tsk->rcv_unacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	tsk->rcv_unacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	/* Adjust to and advertize the correct window limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		msg_set_adv_win(msg, tsk->rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static void tipc_sk_send_ack(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	skb = tipc_sk_build_ack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 			   msg_link_selector(buf_msg(skb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	long timeo = *timeop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	int err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 				err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		if (!skb_queue_empty(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	*timeop = timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  * tipc_recvmsg - receive packet-oriented message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  * @m: descriptor for message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)  * @buflen: length of user buffer area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)  * @flags: receive flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)  * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)  * If the complete message doesn't fit in user area, truncate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)  * Returns size of returned message data, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			size_t buflen,	int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	bool connected = !tipc_sk_type_connectionless(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	int rc, err, hlen, dlen, copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	struct tipc_skb_cb *skb_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	bool grp_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	/* Catch invalid receive requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	if (unlikely(!buflen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	/* Step rcv queue to first msg with data or error; wait if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		skb_cb = TIPC_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		dlen = msg_data_sz(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		hlen = msg_hdr_sz(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		err = msg_errcode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		grp_evt = msg_is_grp_evt(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		if (likely(dlen || err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		tsk_advance_rx_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	/* Collect msg meta data, including error code and rejected data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	tipc_sk_set_orig_addr(m, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	rc = tipc_sk_anc_data_recv(m, skb, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	/* Capture data if non-error msg, otherwise just set return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		int offset = skb_cb->bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		copy = min_t(int, dlen - offset, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		if (unlikely(offset + copy < dlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			if (flags & MSG_EOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 				if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 					skb_cb->bytes_read = offset + copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 				m->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 				skb_cb->bytes_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			if (flags & MSG_EOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 				m->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			skb_cb->bytes_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		copy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			rc = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	/* Mark message as group event if applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (unlikely(grp_evt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			m->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		m->msg_flags |= MSG_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		copy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	/* Caption of data or error code/rejected data was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (unlikely(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	/* Send group flow control advertisement when applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		__skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 					  msg_orignode(hdr), msg_origport(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 					  &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		tipc_node_distr_xmit(sock_net(sk), &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	if (skb_cb->bytes_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	tsk_advance_rx_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	if (likely(!connected))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	/* Send connection flow control advertisement when applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		tipc_sk_send_ack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	return rc ? rc : copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  * tipc_recvstream - receive stream-oriented data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * @m: descriptor for message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  * @buflen: total size of user buffer area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  * @flags: receive flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  * Used for SOCK_STREAM messages only.  If not enough data is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  * will optionally wait for more; never truncates data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)  * Returns size of returned message data, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) static int tipc_recvstream(struct socket *sock, struct msghdr *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			   size_t buflen, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	struct tipc_skb_cb *skb_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	bool peek = flags & MSG_PEEK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	int offset, required, copy, copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	int hlen, dlen, err, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	/* Catch invalid receive attempts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	if (unlikely(!buflen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	if (unlikely(sk->sk_state == TIPC_OPEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		/* Look at first msg in receive queue; wait if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		rc = tipc_wait_for_rcvmsg(sock, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		skb = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		skb_cb = TIPC_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		dlen = msg_data_sz(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		hlen = msg_hdr_sz(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		err = msg_errcode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		/* Discard any empty non-errored (SYN-) message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		if (unlikely(!dlen && !err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 			tsk_advance_rx_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		/* Collect msg meta data, incl. error code and rejected data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		if (!copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			tipc_sk_set_orig_addr(m, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			rc = tipc_sk_anc_data_recv(m, skb, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		/* Copy data if msg ok, otherwise return error/partial data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		if (likely(!err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			offset = skb_cb->bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 			copy = min_t(int, dlen - offset, buflen - copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			copied += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			if (unlikely(offset < dlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 				if (!peek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 					skb_cb->bytes_read = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 				rc = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			if (copied || rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		if (unlikely(peek))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		tsk_advance_rx_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		/* Send connection flow control advertisement when applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 			tipc_sk_send_ack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		/* Exit if all requested data or FIN/error received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		if (copied == buflen || err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	return copied ? copied : rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * tipc_write_space - wake up thread if port congestion is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) static void tipc_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 						EPOLLWRNORM | EPOLLWRBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)  * tipc_data_ready - wake up threads to indicate messages have been received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) static void tipc_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 						EPOLLRDNORM | EPOLLRDBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static void tipc_sock_destruct(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	__skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static void tipc_sk_proto_rcv(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 			      struct sk_buff_head *inputq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			      struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	struct sk_buff *skb = __skb_dequeue(inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	bool wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	switch (msg_user(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	case CONN_MANAGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	case SOCK_WAKEUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		/* coupled with smp_rmb() in tipc_wait_for_cond() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		tsk->cong_link_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		tipc_sk_push_backlog(tsk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	case GROUP_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	case TOP_SRV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 				      hdr, inputq, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)  * tipc_sk_filter_connect - check incoming message for a connection-based socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)  * @tsk: TIPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)  * @skb: pointer to message buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)  * @xmitq: for Nagle ACK if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)  * Returns true if message should be added to receive queue, false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 				   struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	bool con_msg = msg_connected(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	u32 pport = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	u32 pnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	u32 oport = msg_origport(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	u32 onode = msg_orignode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	int err = msg_errcode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	if (unlikely(msg_mcast(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	tsk->oneway = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	case TIPC_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		/* Setup ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		if (likely(con_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			tipc_sk_finish_conn(tsk, oport, onode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			msg_set_importance(&tsk->phdr, msg_importance(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			/* ACK+ message with data is added to receive queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			if (msg_data_sz(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			/* Empty ACK-, - wake up sleeping connect() and drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			msg_set_dest_droppable(hdr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		/* Ignore connectionless message if not from listening socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		if (oport != pport || onode != pnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		/* Rejected SYN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		if (err != TIPC_ERR_OVERLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		/* Prepare for new setup attempt if we have a SYN clone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		if (skb_queue_empty(&sk->sk_write_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		get_random_bytes(&delay, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		delay %= (tsk->conn_timeout / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		delay = msecs_to_jiffies(delay + 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	case TIPC_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	case TIPC_DISCONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	case TIPC_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		/* Accept only SYN message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		if (!msg_is_syn(hdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		    tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		if (!con_msg && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	case TIPC_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		if (!skb_queue_empty(&sk->sk_write_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			tipc_sk_push_backlog(tsk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		/* Accept only connection-based messages sent by peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		if (likely(con_msg && !err && pport == oport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			   pnode == onode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			if (msg_ack_required(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 				struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 				skb = tipc_sk_build_ack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 				if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 					msg_set_nagle_ack(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 					__skb_queue_tail(xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		if (!tsk_peer_msg(tsk, hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		tipc_node_remove_conn(net, pnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		pr_err("Unknown sk_state %u\n", sk->sk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	/* Abort connection setup attempt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	sk->sk_err = ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)  * rcvbuf_limit - get proper overload limit of socket receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)  * @skb: message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)  * For connection oriented messages, irrespective of importance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)  * default queue limit is 2 MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)  * For connectionless messages, queue limits are based on message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)  * importance as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)  * TIPC_LOW_IMPORTANCE       (2 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)  * TIPC_MEDIUM_IMPORTANCE    (4 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)  * TIPC_HIGH_IMPORTANCE      (8 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)  * TIPC_CRITICAL_IMPORTANCE  (16 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)  * Returns overload limit according to corresponding message importance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	if (unlikely(msg_in_group(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		return READ_ONCE(sk->sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	if (unlikely(!msg_connected(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		return READ_ONCE(sk->sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	return FLOWCTL_MSG_LIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)  * tipc_sk_filter_rcv - validate incoming message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)  * @skb: pointer to message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)  * Enqueues message on receive queue if acceptable; optionally handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)  * disconnect indication for a connected socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)  * Called with socket lock already taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 			       struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	bool sk_conn = !tipc_sk_type_connectionless(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	struct sk_buff_head inputq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	int mtyp = msg_type(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	int limit, err = TIPC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	TIPC_SKB_CB(skb)->bytes_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	__skb_queue_head_init(&inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	__skb_queue_tail(&inputq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	if (unlikely(!msg_isdata(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		tipc_sk_proto_rcv(sk, &inputq, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	if (unlikely(grp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		tipc_group_filter_msg(grp, &inputq, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	/* Validate and add to receive buffer if there is space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	while ((skb = __skb_dequeue(&inputq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		limit = rcvbuf_limit(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		    (!sk_conn && msg_connected(hdr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		    (!grp && msg_in_group(hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 			err = TIPC_ERR_NO_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 					   "err_overload2!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 			atomic_inc(&sk->sk_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 			err = TIPC_ERR_OVERLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 						      "@filter_rcv!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 				__skb_queue_tail(xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 			err = TIPC_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		__skb_queue_tail(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 		skb_set_owner_r(skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 					 "rcvq >90% allocated!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)  * tipc_sk_backlog_rcv - handle incoming message from backlog queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  * @skb: message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)  * Caller must hold socket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	unsigned int before = sk_rmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	unsigned int added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	__skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	tipc_sk_filter_rcv(sk, skb, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	added = sk_rmem_alloc_get(sk) - before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	/* Send pending response/rejected messages, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	tipc_node_distr_xmit(sock_net(sk), &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)  * tipc_sk_enqueue - extract all buffers with destination 'dport' from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)  *                   inputq and try adding them to socket or backlog queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)  * @inputq: list of incoming buffers with potentially different destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)  * @sk: socket where the buffers should be enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)  * @dport: port number for the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)  * Caller must hold socket lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			    u32 dport, struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	unsigned int lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	atomic_t *dcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	u32 onode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	while (skb_queue_len(inputq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		if (unlikely(time_after_eq(jiffies, time_limit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		skb = tipc_skb_dequeue(inputq, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		/* Add message directly to receive queue if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			tipc_sk_filter_rcv(sk, skb, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		/* Try backlog, compensating for double-counted bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		if (!sk->sk_backlog.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 			atomic_set(dcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		if (likely(!sk_add_backlog(sk, skb, lim))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 						 "bklg & rcvq >90% allocated!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		/* Overload => reject message back to sender */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		onode = tipc_own_addr(sock_net(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		atomic_inc(&sk->sk_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 					      "@sk_enqueue!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			__skb_queue_tail(xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)  * tipc_sk_rcv - handle a chain of incoming buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)  * @inputq: buffer list containing the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)  * Consumes all buffers in list until inputq is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)  * Note: may be called in multiple threads referring to the same queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	u32 dnode, dport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	__skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	while (skb_queue_len(inputq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		dport = tipc_skb_peek_port(inputq, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		tsk = tipc_sk_lookup(net, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		if (likely(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 			sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 				spin_unlock_bh(&sk->sk_lock.slock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 			/* Send pending response/rejected messages, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			tipc_node_distr_xmit(sock_net(sk), &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		/* No destination socket => dequeue skb if still there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		skb = tipc_skb_dequeue(inputq, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		/* Try secondary lookup if unresolved named message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		err = TIPC_ERR_NO_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		if (tipc_msg_lookup_dest(net, skb, &err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			goto xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		/* Prepare for message rejection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) xmit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 		dnode = msg_destnode(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		tipc_node_xmit_skb(net, skb, dnode, dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		int err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		if (!*timeo_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			return sock_intr_errno(*timeo_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		if (sk->sk_state == TIPC_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 				     &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	} while (!done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	if (addr->family != AF_TIPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	if (addr->addrtype == TIPC_SERVICE_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	return (addr->addrtype == TIPC_SERVICE_ADDR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		addr->addrtype == TIPC_SOCKET_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)  * tipc_connect - establish a connection to another TIPC port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)  * @dest: socket address for destination port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)  * @destlen: size of socket address data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)  * @flags: file-related flags associated with socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) static int tipc_connect(struct socket *sock, struct sockaddr *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			int destlen, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	struct msghdr m = {NULL,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	int previous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	if (destlen != sizeof(struct sockaddr_tipc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	if (tsk->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	if (dst->family == AF_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		if (!tipc_sk_type_connectionless(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 			res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	if (!tipc_sockaddr_is_sane(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	/* DGRAM/RDM connect(), just save the destaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	if (tipc_sk_type_connectionless(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		memcpy(&tsk->peer, dest, destlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	} else if (dst->addrtype == TIPC_SERVICE_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	previous = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	case TIPC_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		/* Send a 'SYN-' to destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		m.msg_name = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		m.msg_namelen = destlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		/* If connect is in non-blocking case, set MSG_DONTWAIT to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		 * indicate send_msg() is never blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 			m.msg_flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		res = __tipc_sendmsg(sock, &m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		if ((res < 0) && (res != -EWOULDBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		/* Just entered TIPC_CONNECTING state; the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		 * difference is that return value in non-blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		 * case is EINPROGRESS, rather than EALREADY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 		res = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	case TIPC_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 			if (previous == TIPC_CONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 				res = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		timeout = msecs_to_jiffies(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		res = tipc_wait_for_connect(sock, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	case TIPC_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		res = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  * tipc_listen - allow socket to listen for incoming connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  * @len: (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) static int tipc_listen(struct socket *sock, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	res = tipc_set_sk_state(sk, TIPC_LISTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static int tipc_wait_for_accept(struct socket *sock, long timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	/* True wake-one mechanism for incoming connections: only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	 * one process gets woken up, not the 'whole herd'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	 * Since we do not 'race & poll' for established sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	 * anymore, the common case will execute the loop only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 			release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 			lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		if (!skb_queue_empty(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)  * tipc_accept - wait for connection request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)  * @sock: listening socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)  * @new_sock: new socket that is to be connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)  * @flags: file-related flags associated with socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		       bool kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	struct sock *new_sk, *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	struct tipc_sock *new_tsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	struct msghdr m = {NULL,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	struct sk_buff *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	if (sk->sk_state != TIPC_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	res = tipc_wait_for_accept(sock, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	buf = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	security_sk_clone(sock->sk, new_sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	new_sk = new_sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	new_tsock = tipc_sk(new_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	msg = buf_msg(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	/* we lock on new_sk; but lockdep sees the lock on sk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	 * Reject any stray messages received by new socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	 * before the socket lock was taken (very, very unlikely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	/* Connect new socket to it's peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	tsk_set_importance(new_sk, msg_importance(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	if (msg_named(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		new_tsock->conn_type = msg_nametype(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		new_tsock->conn_instance = msg_nameinst(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	 * Respond to 'SYN-' by discarding it & returning 'ACK'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	 * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	if (!msg_data_sz(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		tsk_advance_rx_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		__skb_dequeue(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		__skb_queue_head(&new_sk->sk_receive_queue, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		skb_set_owner_r(buf, new_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	__tipc_sendstream(new_sock, &m, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	release_sock(new_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)  * tipc_shutdown - shutdown socket connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)  * @how: direction to close (must be SHUT_RDWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)  * Terminates connection (if necessary), then purges socket's receive queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) static int tipc_shutdown(struct socket *sock, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	if (how != SHUT_RDWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	sk->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	if (sk->sk_state == TIPC_DISCONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		/* Discard any unreceived messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		__skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		res = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	/* Wake up anyone sleeping in poll. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) static void tipc_sk_check_probing_state(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 					struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	u32 pnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	u32 pport = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	u32 self = tsk_own_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	u32 oport = tsk->portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	if (tsk->probe_unacked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		sk->sk_err = ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		tipc_node_remove_conn(sock_net(sk), pnode, pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	/* Prepare new probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 			      pnode, self, pport, oport, TIPC_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 		__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	tsk->probe_unacked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	/* Try again later if dest link is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	if (tsk->cong_link_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		sk_reset_timer(sk, &sk->sk_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 			       jiffies + msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	/* Prepare SYN for retransmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	tipc_msg_skb_clone(&sk->sk_write_queue, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static void tipc_sk_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	struct sock *sk = from_timer(sk, t, sk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	u32 pnode = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	struct sk_buff_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	__skb_queue_head_init(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	/* Try again later if socket is busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	if (sk->sk_state == TIPC_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		tipc_sk_check_probing_state(sk, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	else if (sk->sk_state == TIPC_CONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		tipc_sk_retry_connect(sk, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	if (!skb_queue_empty(&list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	/* SYN messages may cause link congestion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	if (rc == -ELINKCONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		tipc_dest_push(&tsk->cong_links, pnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		tsk->cong_link_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 			   struct tipc_name_seq const *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	struct publication *publ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	u32 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	if (scope != TIPC_NODE_SCOPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		scope = TIPC_CLUSTER_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	if (tipc_sk_connected(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	key = tsk->portid + tsk->pub_count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	if (key == tsk->portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 				    scope, tsk->portid, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	if (unlikely(!publ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	list_add(&publ->binding_sock, &tsk->publications);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	tsk->pub_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	tsk->published = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			    struct tipc_name_seq const *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	struct net *net = sock_net(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	struct publication *publ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	struct publication *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	if (scope != TIPC_NODE_SCOPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		scope = TIPC_CLUSTER_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		if (seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 			if (publ->scope != scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 			if (publ->type != seq->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			if (publ->lower != seq->lower)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			if (publ->upper != seq->upper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 			tipc_nametbl_withdraw(net, publ->type, publ->lower,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 					      publ->upper, publ->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		tipc_nametbl_withdraw(net, publ->type, publ->lower,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 				      publ->upper, publ->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	if (list_empty(&tsk->publications))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		tsk->published = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) /* tipc_sk_reinit: set non-zero address in all existing sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)  *                 when we go from standalone to network mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) void tipc_sk_reinit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	struct rhashtable_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	rhashtable_walk_enter(&tn->sk_rht, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		rhashtable_walk_start(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			sock_hold(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 			rhashtable_walk_stop(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			lock_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 			msg = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 			msg_set_prevnode(msg, tipc_own_addr(net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 			msg_set_orignode(msg, tipc_own_addr(net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			release_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 			rhashtable_walk_start(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 			sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		rhashtable_walk_stop(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	} while (tsk == ERR_PTR(-EAGAIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	rhashtable_walk_exit(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	if (tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		sock_hold(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	return tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) static int tipc_sk_insert(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	while (remaining--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		portid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 			portid = TIPC_MIN_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 		tsk->portid = portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		sock_hold(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 						   tsk_rht_params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static void tipc_sk_remove(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 		__sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static const struct rhashtable_params tsk_rht_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	.nelem_hint = 192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	.head_offset = offsetof(struct tipc_sock, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	.key_offset = offsetof(struct tipc_sock, portid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	.key_len = sizeof(u32), /* portid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	.max_size = 1048576,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	.min_size = 256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	.automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) int tipc_sk_rht_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) void tipc_sk_rht_destroy(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	/* Wait for socket readers to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	rhashtable_destroy(&tn->sk_rht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	struct net *net = sock_net(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	struct tipc_msg *hdr = &tsk->phdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	struct tipc_name_seq seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	if (mreq->type < TIPC_RESERVED_TYPES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	if (mreq->scope > TIPC_NODE_SCOPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	if (grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	if (!grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	tsk->group = grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	msg_set_lookup_scope(hdr, mreq->scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	msg_set_nametype(hdr, mreq->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	msg_set_dest_droppable(hdr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	seq.type = mreq->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	seq.lower = mreq->instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	seq.upper = seq.lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 		tipc_group_delete(net, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		tsk->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	/* Eliminate any risk that a broadcast overtakes sent JOINs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	tsk->mc_method.rcast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	tsk->mc_method.mandatory = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) static int tipc_sk_leave(struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	struct net *net = sock_net(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	struct tipc_group *grp = tsk->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	struct tipc_name_seq seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	int scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	if (!grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	tipc_group_self(grp, &seq, &scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	tipc_group_delete(net, grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	tsk->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	tipc_sk_withdraw(tsk, scope, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)  * tipc_setsockopt - set socket option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)  * @lvl: option level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)  * @opt: option identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)  * @ov: pointer to new option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)  * @ol: length of option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)  * For stream sockets only, accepts and ignores all IPPROTO_TCP options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)  * (to ease compatibility).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 			   sockptr_t ov, unsigned int ol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	struct tipc_group_req mreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	if (lvl != SOL_TIPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	case TIPC_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	case TIPC_SRC_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	case TIPC_DEST_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	case TIPC_CONN_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	case TIPC_NODELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		if (ol < sizeof(value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		if (copy_from_sockptr(&value, ov, sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	case TIPC_GROUP_JOIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		if (ol < sizeof(mreq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		if (!sockptr_is_null(ov) || ol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	case TIPC_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 		res = tsk_set_importance(sk, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	case TIPC_SRC_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		if (sock->type != SOCK_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 			tsk_set_unreliable(tsk, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 			res = -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	case TIPC_DEST_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		tsk_set_unreturnable(tsk, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	case TIPC_CONN_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		tipc_sk(sk)->conn_timeout = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	case TIPC_MCAST_BROADCAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		tsk->mc_method.rcast = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		tsk->mc_method.mandatory = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	case TIPC_MCAST_REPLICAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		tsk->mc_method.rcast = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		tsk->mc_method.mandatory = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	case TIPC_GROUP_JOIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		res = tipc_sk_join(tsk, &mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	case TIPC_GROUP_LEAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		res = tipc_sk_leave(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	case TIPC_NODELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		tsk->nodelay = !!value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		tsk_set_nagle(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)  * tipc_getsockopt - get socket option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)  * @sock: socket structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)  * @lvl: option level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)  * @opt: option identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)  * @ov: receptacle for option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)  * @ol: receptacle for length of option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)  * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)  * (to ease compatibility).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 			   char __user *ov, int __user *ol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	struct tipc_sock *tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	struct tipc_name_seq seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	int len, scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 		return put_user(0, ol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	if (lvl != SOL_TIPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	res = get_user(len, ol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	case TIPC_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		value = tsk_importance(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	case TIPC_SRC_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 		value = tsk_unreliable(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	case TIPC_DEST_DROPPABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 		value = tsk_unreturnable(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	case TIPC_CONN_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		value = tsk->conn_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		/* no need to set "res", since already 0 at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	case TIPC_NODE_RECVQ_DEPTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		value = 0; /* was tipc_queue_size, now obsolete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	case TIPC_SOCK_RECVQ_DEPTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		value = skb_queue_len(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	case TIPC_SOCK_RECVQ_USED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		value = sk_rmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	case TIPC_GROUP_JOIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		seq.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		if (tsk->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			tipc_group_self(tsk->group, &seq, &scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		value = seq.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		return res;	/* "get" failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	if (len < sizeof(value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	if (copy_to_user(ov, &value, sizeof(value)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	return put_user(sizeof(value), ol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	struct net *net = sock_net(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	struct tipc_sioc_nodeid_req nr = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	struct tipc_sioc_ln_req lnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	case SIOCGETLINKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 		if (copy_from_user(&lnr, argp, sizeof(lnr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		if (!tipc_node_get_linkname(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 					    lnr.bearer_id & 0xffff, lnr.peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 			if (copy_to_user(argp, &lnr, sizeof(lnr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	case SIOCGETNODEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		if (copy_from_user(&nr, argp, sizeof(nr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 			return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		if (copy_to_user(argp, &nr, sizeof(nr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	u32 onode = tipc_own_addr(sock_net(sock1->sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	tsk1->peer.family = AF_TIPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	tsk1->peer.addrtype = TIPC_ADDR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	tsk1->peer.scope = TIPC_NODE_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	tsk1->peer.addr.id.ref = tsk2->portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	tsk1->peer.addr.id.node = onode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	tsk2->peer.family = AF_TIPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	tsk2->peer.addrtype = TIPC_ADDR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	tsk2->peer.scope = TIPC_NODE_SCOPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	tsk2->peer.addr.id.ref = tsk1->portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	tsk2->peer.addr.id.node = onode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) /* Protocol switches for the various types of TIPC sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) static const struct proto_ops msg_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	.family		= AF_TIPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	.release	= tipc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	.bind		= tipc_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	.connect	= tipc_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	.socketpair	= tipc_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	.accept		= sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	.getname	= tipc_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	.poll		= tipc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	.ioctl		= tipc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	.listen		= sock_no_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	.shutdown	= tipc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	.setsockopt	= tipc_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	.getsockopt	= tipc_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	.sendmsg	= tipc_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	.recvmsg	= tipc_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	.mmap		= sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	.sendpage	= sock_no_sendpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) static const struct proto_ops packet_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	.family		= AF_TIPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	.release	= tipc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	.bind		= tipc_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	.connect	= tipc_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	.socketpair	= tipc_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	.accept		= tipc_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	.getname	= tipc_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	.poll		= tipc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	.ioctl		= tipc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	.listen		= tipc_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	.shutdown	= tipc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	.setsockopt	= tipc_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	.getsockopt	= tipc_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	.sendmsg	= tipc_send_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	.recvmsg	= tipc_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	.mmap		= sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	.sendpage	= sock_no_sendpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) static const struct proto_ops stream_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	.family		= AF_TIPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	.release	= tipc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	.bind		= tipc_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	.connect	= tipc_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	.socketpair	= tipc_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	.accept		= tipc_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	.getname	= tipc_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	.poll		= tipc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	.ioctl		= tipc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	.listen		= tipc_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	.shutdown	= tipc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	.setsockopt	= tipc_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	.getsockopt	= tipc_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	.sendmsg	= tipc_sendstream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	.recvmsg	= tipc_recvstream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	.mmap		= sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	.sendpage	= sock_no_sendpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) static const struct net_proto_family tipc_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	.family		= AF_TIPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	.create		= tipc_sk_create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) static struct proto tipc_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	.name		= "TIPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	.obj_size	= sizeof(struct tipc_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	.sysctl_rmem	= sysctl_tipc_rmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)  * tipc_socket_init - initialize TIPC socket interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)  * Returns 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) int tipc_socket_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	res = proto_register(&tipc_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		pr_err("Failed to register TIPC protocol type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	res = sock_register(&tipc_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		pr_err("Failed to register TIPC socket type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		proto_unregister(&tipc_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)  * tipc_socket_stop - stop TIPC socket interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) void tipc_socket_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	sock_unregister(tipc_family_ops.family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	proto_unregister(&tipc_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) /* Caller should hold socket lock for the passed tipc socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	u32 peer_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	u32 peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	peer_node = tsk_peer_node(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	peer_port = tsk_peer_port(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	if (tsk->conn_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 			goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 			goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) msg_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 			  *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	    nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	if (tipc_sk_connected(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		if (__tipc_nl_add_sk_con(skb, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 			return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	} else if (!list_empty(&tsk->publications)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 			return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) /* Caller should hold socket lock for the passed tipc socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 			    struct tipc_sock *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	struct nlattr *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		goto msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		goto genlmsg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	if (__tipc_nl_add_sk_info(skb, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	nla_nest_end(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	genlmsg_end(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) attr_msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	nla_nest_cancel(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) genlmsg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	genlmsg_cancel(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		    int (*skb_handler)(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 				       struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 				       struct tipc_sock *tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	struct rhashtable_iter *iter = (void *)cb->args[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	rhashtable_walk_start(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 		if (IS_ERR(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 			err = PTR_ERR(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 			if (err == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 				err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 		sock_hold(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 		rhashtable_walk_stop(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		lock_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 		err = skb_handler(skb, cb, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 			release_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 			sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		release_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		rhashtable_walk_start(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	rhashtable_walk_stop(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) EXPORT_SYMBOL(tipc_nl_sk_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int tipc_dump_start(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) EXPORT_SYMBOL(tipc_dump_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	struct rhashtable_iter *iter = (void *)cb->args[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	if (!iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 		cb->args[4] = (long)iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	rhashtable_walk_enter(&tn->sk_rht, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) int tipc_dump_done(struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	struct rhashtable_iter *hti = (void *)cb->args[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	rhashtable_walk_exit(hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	kfree(hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) EXPORT_SYMBOL(tipc_dump_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 			   struct tipc_sock *tsk, u32 sk_filter_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	struct sock *sk = &tsk->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	struct nlattr *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	struct nlattr *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	/*filter response w.r.t sk_state*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	if (!(sk_filter_state & (1 << sk->sk_state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 		goto msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	if (__tipc_nl_add_sk_info(skb, tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	    nla_put_u32(skb, TIPC_NLA_SOCK_UID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 					 sock_i_uid(sk))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 	    nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 			      tipc_diag_gen_cookie(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 			      TIPC_NLA_SOCK_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 			skb_queue_len(&sk->sk_receive_queue)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 			skb_queue_len(&sk->sk_write_queue)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	    nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 			atomic_read(&sk->sk_drops)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 		goto stat_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 	if (tsk->cong_link_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 		goto stat_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	if (tsk_conn_cong(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	    nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 		goto stat_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	nla_nest_end(skb, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	if (tsk->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 		if (tipc_group_fill_sock_diag(tsk->group, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			goto stat_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	nla_nest_end(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) stat_msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	nla_nest_cancel(skb, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) attr_msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	nla_nest_cancel(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) /* Caller should hold socket lock for the passed tipc socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 				 struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 				 struct publication *publ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	struct nlattr *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 		goto msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 		goto genlmsg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 		goto attr_msg_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	nla_nest_end(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	genlmsg_end(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) attr_msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	nla_nest_cancel(skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) genlmsg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	genlmsg_cancel(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) msg_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) /* Caller should hold socket lock for the passed tipc socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 				  struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 				  struct tipc_sock *tsk, u32 *last_publ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	struct publication *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	if (*last_publ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 		list_for_each_entry(p, &tsk->publications, binding_sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 			if (p->key == *last_publ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 			/* We never set seq or call nl_dump_check_consistent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 			 * this means that setting prev_seq here will cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 			 * consistence check to fail in the netlink callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 			 * handler. Resulting in the last NLMSG_DONE message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 			 * having the NLM_F_DUMP_INTR flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 			cb->prev_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 			*last_publ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		p = list_first_entry(&tsk->publications, struct publication,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 				     binding_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		err = __tipc_nl_add_sk_publ(skb, cb, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 			*last_publ = p->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	*last_publ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 	u32 tsk_portid = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	u32 last_publ = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	u32 done = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 	if (!tsk_portid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		if (!attrs[TIPC_NLA_SOCK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 						  attrs[TIPC_NLA_SOCK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 						  tipc_nl_sock_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		if (!sock[TIPC_NLA_SOCK_REF])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	tsk = tipc_sk_lookup(net, tsk_portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	lock_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	release_sock(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	sock_put(&tsk->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	cb->args[0] = tsk_portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	cb->args[1] = last_publ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	cb->args[2] = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)  * tipc_sk_filtering - check if a socket should be traced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)  * @sk: the socket to be examined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)  * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)  *  (portid, sock type, name type, name lower, name upper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)  * Returns true if the socket meets the socket tuple data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)  * (value 0 = 'any') or when there is no tuple set (all = 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)  * otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) bool tipc_sk_filtering(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	struct publication *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	u32 _port, _sktype, _type, _lower, _upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	u32 type = 0, lower = 0, upper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	_port = sysctl_tipc_sk_filter[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	_sktype = sysctl_tipc_sk_filter[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	_type = sysctl_tipc_sk_filter[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	_lower = sysctl_tipc_sk_filter[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	_upper = sysctl_tipc_sk_filter[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	if (!_port && !_sktype && !_type && !_lower && !_upper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	if (_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 		return (_port == tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	if (_sktype && _sktype != sk->sk_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 	if (tsk->published) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 		p = list_first_entry_or_null(&tsk->publications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 					     struct publication, binding_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 		if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 			type = p->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 			lower = p->lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 			upper = p->upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	if (!tipc_sk_type_connectionless(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		type = tsk->conn_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		lower = tsk->conn_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		upper = tsk->conn_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 	if ((_type && _type != type) || (_lower && _lower != lower) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	    (_upper && _upper != upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) u32 tipc_sock_get_portid(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	return (sk) ? (tipc_sk(sk))->portid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)  * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)  *			both the rcv and backlog queues are considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)  * @sk: tipc sk to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)  * @skb: tipc msg to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)  * Returns true if the socket rx queue allocation is > 90%, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 	unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	return (qsize > lim * 90 / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)  * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)  *			only the rcv queue is considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)  * @sk: tipc sk to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)  * @skb: tipc msg to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)  * Returns true if the socket rx queue allocation is > 90%, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	unsigned int lim = rcvbuf_limit(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	unsigned int qsize = sk_rmem_alloc_get(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	return (qsize > lim * 90 / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)  * tipc_sk_dump - dump TIPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)  * @sk: tipc sk to be dumped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)  * @dqueues: bitmask to decide if any socket queue to be dumped?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)  *           - TIPC_DUMP_NONE: don't dump socket queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929)  *           - TIPC_DUMP_SK_SNDQ: dump socket send queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)  *           - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)  *           - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)  *           - TIPC_DUMP_ALL: dump all the socket queues above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)  * @buf: returned buffer of dump data in format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 	size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	struct tipc_sock *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 	struct publication *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	bool tsk_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	if (!sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 		i += scnprintf(buf, sz, "sk data: (null)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	tsk = tipc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	tsk_connected = !tipc_sk_type_connectionless(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	if (tsk_connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 		i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 	i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	if (tsk->published) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 		p = list_first_entry_or_null(&tsk->publications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 					     struct publication, binding_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 		i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	if (dqueues & TIPC_DUMP_SK_SNDQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 		i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 		i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	if (dqueues & TIPC_DUMP_SK_RCVQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 		i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 		i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	if (dqueues & TIPC_DUMP_SK_BKLGQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		i += scnprintf(buf + i, sz - i, "sk_backlog:\n  head ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 		if (sk->sk_backlog.tail != sk->sk_backlog.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 			i += scnprintf(buf + i, sz - i, "  tail ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 			i += tipc_skb_dump(sk->sk_backlog.tail, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 					   buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) }