^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * net/tipc/node.c: TIPC node management routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 3. Neither the names of the copyright holders nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * contributors may be used to endorse or promote products derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Alternatively, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * GNU General Public License ("GPL") version 2 as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "link.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "name_distr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "socket.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "bcast.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "monitor.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "discover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "netlink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "crypto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define INVALID_NODE_SIG 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define NODE_CLEANUP_AFTER 300000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Flags used to take different actions according to flag type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * TIPC_NOTIFY_NODE_DOWN: notify node is down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * TIPC_NOTIFY_NODE_UP: notify node is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) TIPC_NOTIFY_NODE_DOWN = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) TIPC_NOTIFY_NODE_UP = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) TIPC_NOTIFY_LINK_UP = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) TIPC_NOTIFY_LINK_DOWN = (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct tipc_link_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) spinlock_t lock; /* per link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct sk_buff_head inputq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct tipc_media_addr maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tipc_bclink_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct sk_buff_head inputq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct sk_buff_head arrvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct sk_buff_head inputq2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct sk_buff_head namedq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u16 named_rcv_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bool named_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * struct tipc_node - TIPC node structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @addr: network address of node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @ref: reference counter to node object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @lock: rwlock governing access to structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @net: the applicable net namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @hash: links to adjacent nodes in unsorted hash chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @inputq: pointer to input queue containing messages for msg event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @namedq: pointer to name table input queue with name table messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @active_links: bearer ids of active links, used as index into links[] array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @links: array containing references to all links to node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @action_flags: bit mask of different types of node actions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @state: connectivity state vs peer node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @preliminary: a preliminary node or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @sync_point: sequence number where synch/failover is finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @list: links to adjacent nodes in sorted list of cluster's nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @working_links: number of working links to node (both active and standby)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @link_cnt: number of links to node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @capabilities: bitmap, indicating peer node's functional capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @signature: node instance identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @link_id: local and remote bearer ids of changing link, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @publ_list: list of publications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @rcu: rcu struct for tipc_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * @delete_at: indicates the time for deleting a down node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @crypto_rx: RX crypto handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct tipc_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rwlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int active_links[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct tipc_link_entry links[MAX_BEARERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct tipc_bclink_entry bc_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int action_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bool preliminary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool failover_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u16 sync_point;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int link_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 working_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u16 capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 link_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u8 peer_id[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char peer_id_string[NODE_ID_STR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct list_head publ_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct list_head conn_sks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long keepalive_intv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long delete_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct net *peer_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 peer_hash_mix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct tipc_crypto *crypto_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Node FSM states and events:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) SELF_DOWN_PEER_DOWN = 0xdd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) SELF_UP_PEER_UP = 0xaa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) SELF_DOWN_PEER_LEAVING = 0xd1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) SELF_UP_PEER_COMING = 0xac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) SELF_COMING_PEER_UP = 0xca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) SELF_LEAVING_PEER_DOWN = 0x1d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) NODE_FAILINGOVER = 0xf0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) NODE_SYNCHING = 0xcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) SELF_ESTABL_CONTACT_EVT = 0xece,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) SELF_LOST_CONTACT_EVT = 0x1ce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) PEER_ESTABL_CONTACT_EVT = 0x9ece,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) PEER_LOST_CONTACT_EVT = 0x91ce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) NODE_FAILOVER_BEGIN_EVT = 0xfbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) NODE_FAILOVER_END_EVT = 0xfee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) NODE_SYNCH_BEGIN_EVT = 0xcbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) NODE_SYNCH_END_EVT = 0xcee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct sk_buff_head *xmitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct tipc_media_addr **maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void tipc_node_delete(struct tipc_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static void tipc_node_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static bool node_is_up(struct tipc_node *n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void tipc_node_delete_from_list(struct tipc_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct tipc_sock_conn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 peer_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int bearer_id = n->active_links[sel & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (unlikely(bearer_id == INVALID_BEARER_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return n->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned int mtu = MAX_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Allow MAX_MSG_SIZE when building connection oriented message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * if they are in the same core network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (n->peer_net && connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) bearer_id = n->active_links[sel & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (likely(bearer_id != INVALID_BEARER_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mtu = n->links[bearer_id].mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u8 *own_id = tipc_own_id(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!own_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (addr == tipc_own_addr(net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) memcpy(id, own_id, TIPC_NODEID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u16 tipc_node_get_capabilities(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u16 caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return TIPC_NODE_CAPABILITIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) caps = n->capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u32 tipc_node_get_addr(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return (node) ? node->addr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) char *tipc_node_get_id_str(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return node->peer_id_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * tipc_node_crypto_rx - Retrieve crypto RX handle from node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Note: node ref counter must be held first!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return (__n) ? __n->crypto_rx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return container_of(pos, struct tipc_node, list)->crypto_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return (n) ? n->crypto_rx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void tipc_node_free(struct rcu_head *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tipc_crypto_stop(&n->crypto_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void tipc_node_kref_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct tipc_node *n = container_of(kref, struct tipc_node, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) kfree(n->bc_entry.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) call_rcu(&n->rcu, tipc_node_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void tipc_node_put(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) kref_put(&node->kref, tipc_node_kref_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void tipc_node_get(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kref_get(&node->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * tipc_node_find - locate specified node object, if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int thash = tipc_hashfn(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (node->addr != addr || node->preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!kref_get_unless_zero(&node->kref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* tipc_node_find_by_id - locate specified node object by its 128-bit id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Note: this function is called only when a discovery request failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * to find the node by its 32-bit id, and is not time critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) list_for_each_entry_rcu(n, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) read_lock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!memcmp(id, n->peer_id, 16) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) kref_get_unless_zero(&n->kref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) read_unlock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return found ? n : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void tipc_node_read_lock(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) read_lock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void tipc_node_read_unlock(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) read_unlock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void tipc_node_write_lock(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) write_lock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void tipc_node_write_unlock_fast(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) write_unlock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void tipc_node_write_unlock(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct net *net = n->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 flags = n->action_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 link_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct list_head *publ_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (likely(!flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) write_unlock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) addr = n->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) link_id = n->link_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bearer_id = link_id & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) publ_list = &n->publ_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) write_unlock_bh(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (flags & TIPC_NOTIFY_NODE_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) tipc_publ_notify(net, publ_list, addr, n->capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (flags & TIPC_NOTIFY_NODE_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) tipc_named_node_up(net, addr, n->capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (flags & TIPC_NOTIFY_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) tipc_mon_peer_up(net, addr, bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) TIPC_NODE_SCOPE, link_id, link_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (flags & TIPC_NOTIFY_LINK_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) tipc_mon_peer_down(net, addr, bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) addr, link_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int net_id = tipc_netid(n->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct tipc_net *tn_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct net *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u32 hash_chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (n->peer_net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) for_each_net_rcu(tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) tn_peer = tipc_net(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!tn_peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Integrity checking whether node exists in namespace or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (tn_peer->net_id != net_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (hash_mixes ^ hash_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) n->peer_net = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) n->peer_hash_mix = hash_mixes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) u16 capabilities, u32 hash_mixes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bool preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct tipc_node *n, *temp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct tipc_link *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long intv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) spin_lock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) n = tipc_node_find(net, addr) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) tipc_node_find_by_id(net, peer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!n->preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* A preliminary node becomes "real" now, refresh its data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) n->preliminary = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) n->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) hlist_del_rcu(&n->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) hlist_add_head_rcu(&n->hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &tn->node_htable[tipc_hashfn(addr)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) list_del_rcu(&n->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (n->addr < temp_node->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) list_add_tail_rcu(&n->list, &temp_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (n->peer_hash_mix ^ hash_mixes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) tipc_node_assign_peer_net(n, hash_mixes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (n->capabilities == capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Same node may come back with new capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) n->capabilities = capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) l = n->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tipc_link_update_caps(l, capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Calculate cluster capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) tn->capabilities = TIPC_NODE_CAPABILITIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) tn->capabilities &= temp_node->capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) tipc_bcast_toggle_rcast(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) (tn->capabilities & TIPC_BCAST_RCAST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) n = kzalloc(sizeof(*n), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pr_warn("Node creation failed, no memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tipc_nodeid2string(n->peer_id_string, peer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) n = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) n->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) n->preliminary = preliminary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) memcpy(&n->peer_id, peer_id, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) n->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) n->peer_net = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) n->peer_hash_mix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Assign kernel local namespace if exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tipc_node_assign_peer_net(n, hash_mixes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) n->capabilities = capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) kref_init(&n->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rwlock_init(&n->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) INIT_HLIST_NODE(&n->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) INIT_LIST_HEAD(&n->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) INIT_LIST_HEAD(&n->publ_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) INIT_LIST_HEAD(&n->conn_sks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) skb_queue_head_init(&n->bc_entry.namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) skb_queue_head_init(&n->bc_entry.inputq1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __skb_queue_head_init(&n->bc_entry.arrvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) skb_queue_head_init(&n->bc_entry.inputq2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) for (i = 0; i < MAX_BEARERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_lock_init(&n->links[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) n->state = SELF_DOWN_PEER_LEAVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) n->signature = INVALID_NODE_SIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) n->active_links[0] = INVALID_BEARER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) n->active_links[1] = INVALID_BEARER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) n->bc_entry.link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) tipc_node_get(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) timer_setup(&n->timer, tipc_node_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Start a slow timer anyway, crypto needs it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) n->keepalive_intv = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!mod_timer(&n->timer, intv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) tipc_node_get(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (n->addr < temp_node->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) list_add_tail_rcu(&n->list, &temp_node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Calculate cluster capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) tn->capabilities = TIPC_NODE_CAPABILITIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tn->capabilities &= temp_node->capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) trace_tipc_node_create(n, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long tol = tipc_link_tolerance(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Link with lowest tolerance determines timer interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (intv < n->keepalive_intv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) n->keepalive_intv = intv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Ensure link's abort limit corresponds to current tolerance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static void tipc_node_delete_from_list(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) tipc_crypto_key_flush(node->crypto_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) list_del_rcu(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) hlist_del_rcu(&node->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void tipc_node_delete(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) trace_tipc_node_delete(node, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) tipc_node_delete_from_list(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) del_timer_sync(&node->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void tipc_node_stop(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct tipc_node *node, *t_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_lock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) list_for_each_entry_safe(node, t_node, &tn->node_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) tipc_node_delete(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (in_own_node(net, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) list_add_tail(subscr, &n->publ_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (in_own_node(net, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_del_init(subscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct tipc_sock_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (in_own_node(net, dnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) node = tipc_node_find(net, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_warn("Connecting sock to node 0x%x failed\n", dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) err = -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) conn->peer_node = dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) conn->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) conn->peer_port = peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) tipc_node_write_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) list_add_tail(&conn->list, &node->conn_sks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) tipc_node_write_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct tipc_sock_conn *conn, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (in_own_node(net, dnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) node = tipc_node_find(net, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) tipc_node_write_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (port != conn->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) list_del(&conn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) kfree(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tipc_node_write_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static void tipc_node_clear_links(struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct tipc_link_entry *le = &node->links[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (le->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) kfree(le->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) le->link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) node->link_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* tipc_node_cleanup - delete nodes that does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * have active links for NODE_CLEANUP_AFTER time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static bool tipc_node_cleanup(struct tipc_node *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct tipc_node *temp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct tipc_net *tn = tipc_net(peer->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bool deleted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* If lock held by tipc_node_stop() the node will be deleted anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!spin_trylock_bh(&tn->node_list_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tipc_node_write_lock(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) tipc_node_clear_links(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) tipc_node_delete_from_list(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) deleted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tipc_node_write_unlock(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Calculate cluster capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tn->capabilities = TIPC_NODE_CAPABILITIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) tn->capabilities &= temp_node->capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) tipc_bcast_toggle_rcast(peer->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (tn->capabilities & TIPC_BCAST_RCAST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* tipc_node_timeout - handle expiration of node timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void tipc_node_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct tipc_node *n = from_timer(n, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct tipc_link_entry *le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int remains = n->link_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) trace_tipc_node_timeout(n, false, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!node_is_up(n) && tipc_node_cleanup(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*Removing the reference of Timer*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Take any crypto key related actions first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) tipc_crypto_timeout(n->crypto_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Initial node interval to value larger (10 seconds), then it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * recalculated with link lowest tolerance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) n->keepalive_intv = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) le = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (le->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_lock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Link tolerance may change asynchronously: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) tipc_node_calculate_timer(n, le->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rc = tipc_link_timeout(le->link, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_unlock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) remains--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (rc & TIPC_LINK_DOWN_EVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) tipc_node_link_down(n, bearer_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * __tipc_node_link_up - handle addition of link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Node lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Link becomes active (alone or shared) or standby, depending on its priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int *slot0 = &n->active_links[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int *slot1 = &n->active_links[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct tipc_link *ol = node_active_link(n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct tipc_link *nl = n->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!nl || tipc_link_is_up(nl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!tipc_link_is_up(nl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) n->working_links++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) n->action_flags |= TIPC_NOTIFY_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) n->link_id = tipc_link_id(nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Leave room for tunnel header when returning 'mtu' to users: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) n->links[bearer_id].mtu = tipc_link_mss(nl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tipc_bearer_add_dest(n->net, bearer_id, n->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pr_debug("Established link <%s> on network plane %c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) tipc_link_name(nl), tipc_link_plane(nl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) trace_tipc_node_link_up(n, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Ensure that a STATE message goes first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) tipc_link_build_state_msg(nl, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* First link? => give it both slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!ol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) *slot0 = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *slot1 = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) n->action_flags |= TIPC_NOTIFY_NODE_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tipc_link_set_active(nl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) tipc_bcast_add_peer(n->net, nl, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Second link => redistribute slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *slot0 = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *slot1 = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) tipc_link_set_active(nl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) tipc_link_set_active(ol, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tipc_link_set_active(nl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *slot1 = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Prepare synchronization with first link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * tipc_node_link_up - handle addition of link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Link becomes active (alone or shared) or standby, depending on its priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct tipc_media_addr *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) __tipc_node_link_up(n, bearer_id, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) maddr = &n->links[bearer_id].maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) tipc_node_write_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * tipc_node_link_failover() - start failover in case "half-failover"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * This function is only called in a very special situation where link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * failover can be already started on peer node but not on this node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * This can happen when e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * 1. Both links <1A-2A>, <1B-2B> down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * disturbance, wrong session, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * 3. Link <1B-2B> up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * 5. Node 2 starts failover onto link <1B-2B>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * ==> Node 1 does never start link/node failover!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * @n: tipc node structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @l: link peer endpoint failingover (- can be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * @tnl: tunnel link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * @xmitq: queue for messages to be xmited on tnl link later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct tipc_link *tnl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Avoid to be "self-failover" that can never end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!tipc_link_is_up(tnl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Don't rush, failure link may be in the process of resetting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (l && !tipc_link_is_reset(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tipc_link_failover_prepare(l, tnl, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * __tipc_node_link_down - handle loss of link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct sk_buff_head *xmitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct tipc_media_addr **maddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct tipc_link_entry *le = &n->links[*bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int *slot0 = &n->active_links[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int *slot1 = &n->active_links[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int i, highest = 0, prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct tipc_link *l, *_l, *tnl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) l = n->links[*bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (!l || tipc_link_is_reset(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) n->working_links--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) n->link_id = tipc_link_id(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pr_debug("Lost link <%s> on network plane %c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) tipc_link_name(l), tipc_link_plane(l));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* Select new active link if any available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *slot0 = INVALID_BEARER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *slot1 = INVALID_BEARER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) for (i = 0; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) _l = n->links[i].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (!_l || !tipc_link_is_up(_l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (_l == l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) prio = tipc_link_prio(_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (prio < highest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (prio > highest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) highest = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *slot0 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) *slot1 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) *slot1 = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (!node_is_up(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (tipc_link_peer_is_down(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) tipc_link_fsm_evt(l, LINK_RESET_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) tipc_link_reset(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) tipc_link_build_reset_msg(l, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *maddr = &n->links[*bearer_id].maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) node_lost_contact(n, &le->inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* There is still a working link => initiate failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *bearer_id = n->active_links[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) tnl = n->links[*bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) tipc_link_reset(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) tipc_link_fsm_evt(l, LINK_RESET_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *maddr = &n->links[*bearer_id].maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct tipc_link_entry *le = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct tipc_media_addr *maddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct tipc_link *l = le->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int old_bearer_id = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!tipc_link_is_establishing(l)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Defuse pending tipc_node_link_up() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) tipc_link_reset(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tipc_link_fsm_evt(l, LINK_RESET_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) kfree(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) le->link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) n->link_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) trace_tipc_node_link_down(n, true, "node link down or deleted!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) tipc_node_write_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (!skb_queue_empty(&xmitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) tipc_sk_rcv(n->net, &le->inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static bool node_is_up(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return n->active_links[0] != INVALID_BEARER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) bool tipc_node_is_up(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) bool retval = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (in_own_node(net, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) retval = node_is_up(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) addr ^= tipc_net(net)->random;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) while ((n = tipc_node_find(net, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * Returns suggested address if any, otherwise 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) bool preliminary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) u32 sugg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Suggest new address if some other peer is using this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) n = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!memcmp(n->peer_id, id, NODE_ID_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return tipc_node_suggest_addr(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* Suggest previously used address if peer is known */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) n = tipc_node_find_by_id(net, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) sugg_addr = n->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) preliminary = n->preliminary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return sugg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Even this node may be in conflict */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (tn->trial_addr == addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return tipc_node_suggest_addr(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void tipc_node_check_dest(struct net *net, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u8 *peer_id, struct tipc_bearer *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u16 capabilities, u32 signature, u32 hash_mixes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct tipc_media_addr *maddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) bool *respond, bool *dupl_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct tipc_link *l, *snd_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct tipc_link_entry *le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) bool addr_match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) bool sign_match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) bool link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) bool accept_addr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) bool reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) char *if_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) unsigned long intv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) u16 session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *dupl_addr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) *respond = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (unlikely(!n->bc_entry.link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) snd_l = tipc_bc_sndlink(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!tipc_link_bc_create(net, tipc_own_addr(net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) addr, peer_id, U16_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) tipc_link_min_win(snd_l),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) tipc_link_max_win(snd_l),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) n->capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) &n->bc_entry.inputq1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) &n->bc_entry.namedq, snd_l,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) &n->bc_entry.link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) pr_warn("Broadcast rcv link creation failed, no mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) le = &n->links[b->identity];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* Prepare to validate requesting node's signature and media address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) l = le->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) link_up = l && tipc_link_is_up(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) sign_match = (signature == n->signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* These three flags give us eight permutations: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (sign_match && addr_match && link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* All is fine. Do nothing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* Peer node is not a container/local namespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!n->peer_hash_mix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) n->peer_hash_mix = hash_mixes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else if (sign_match && addr_match && !link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* Respond. The link will come up in due time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *respond = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) } else if (sign_match && !addr_match && link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* Peer has changed i/f address without rebooting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * If so, the link will reset soon, and the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * discovery will be accepted. So we can ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * It may also be an cloned or malicious peer having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * chosen the same node address and signature as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * existing one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * Ignore requests until the link goes down, if ever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *dupl_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) } else if (sign_match && !addr_match && !link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* Peer link has changed i/f address without rebooting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * It may also be a cloned or malicious peer; we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * distinguish between the two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * The signature is correct, so we must accept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) accept_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) *respond = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) } else if (!sign_match && addr_match && link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Peer node rebooted. Two possibilities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * - Delayed re-discovery; this link endpoint has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * reset and re-established contact with the peer, before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * receiving a discovery message from that node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * (The peer happened to receive one from this node first).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * - The peer came back so fast that our side has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * discovered it yet. Probing from this side will soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * reset the link, since there can be no working link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * endpoint at the peer end, and the link will re-establish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * Accept the signature, since it comes from a known peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) n->signature = signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else if (!sign_match && addr_match && !link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* The peer node has rebooted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * Accept signature, since it is a known peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) n->signature = signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *respond = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) } else if (!sign_match && !addr_match && link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* Peer rebooted with new address, or a new/duplicate peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Ignore until the link goes down, if ever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) *dupl_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) } else if (!sign_match && !addr_match && !link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Peer rebooted with new address, or it is a new peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * Accept signature and address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) n->signature = signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) accept_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *respond = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!accept_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* Now create new link if not already existing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (n->link_cnt == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if_name = strchr(b->name, ':') + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) get_random_bytes(&session, sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) b->net_plane, b->mtu, b->priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) b->min_win, b->max_win, session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) tipc_own_addr(net), addr, peer_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) n->capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) tipc_bc_sndlink(n->net), n->bc_entry.link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) &le->inputq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) &n->bc_entry.namedq, &l)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) *respond = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) tipc_link_reset(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) tipc_link_fsm_evt(l, LINK_RESET_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (n->state == NODE_FAILINGOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) le->link = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) n->link_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) tipc_node_calculate_timer(n, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (n->link_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!mod_timer(&n->timer, intv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) tipc_node_get(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) memcpy(&le->maddr, maddr, sizeof(*maddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) tipc_node_write_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (reset && l && !tipc_link_is_reset(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) tipc_node_link_down(n, b->identity, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) void tipc_node_delete_links(struct net *net, int bearer_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) list_for_each_entry_rcu(n, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) tipc_node_link_down(n, bearer_id, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static void tipc_node_reset_links(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) pr_warn("Resetting all links to %x\n", n->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) trace_tipc_node_reset_links(n, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) for (i = 0; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) tipc_node_link_down(n, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* tipc_node_fsm_evt - node finite state machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * Determines when contact is allowed with peer node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) int state = n->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) case SELF_DOWN_PEER_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) state = SELF_UP_PEER_COMING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) state = SELF_COMING_PEER_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) case SELF_UP_PEER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) state = SELF_DOWN_PEER_LEAVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) state = SELF_LEAVING_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) state = NODE_SYNCHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) state = NODE_FAILINGOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) case SELF_DOWN_PEER_LEAVING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) state = SELF_DOWN_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) case SELF_UP_PEER_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) state = SELF_UP_PEER_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) state = SELF_DOWN_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) case SELF_COMING_PEER_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) state = SELF_UP_PEER_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) state = SELF_DOWN_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) case SELF_LEAVING_PEER_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) state = SELF_DOWN_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case NODE_FAILINGOVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) state = SELF_DOWN_PEER_LEAVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) state = SELF_LEAVING_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) state = SELF_UP_PEER_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) case NODE_SYNCHING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) switch (evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) case SELF_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) state = SELF_DOWN_PEER_LEAVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) case PEER_LOST_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) state = SELF_LEAVING_PEER_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) case NODE_SYNCH_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) state = SELF_UP_PEER_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) case NODE_FAILOVER_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) state = NODE_FAILINGOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) case NODE_SYNCH_BEGIN_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) case SELF_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) case PEER_ESTABL_CONTACT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) case NODE_FAILOVER_END_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) goto illegal_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) pr_err("Unknown node fsm state %x\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) n->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) illegal_evt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static void node_lost_contact(struct tipc_node *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct sk_buff_head *inputq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct tipc_sock_conn *conn, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct tipc_link *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct list_head *conns = &n->conn_sks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) uint i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) pr_debug("Lost contact with %x\n", n->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) trace_tipc_node_lost_contact(n, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* Clean up broadcast state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) tipc_bcast_remove_peer(n->net, n->bc_entry.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) skb_queue_purge(&n->bc_entry.namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Abort any ongoing link failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) for (i = 0; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) l = n->links[i].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* Notify publications from this node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) n->peer_net = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) n->peer_hash_mix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* Notify sockets connected to node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) list_for_each_entry_safe(conn, safe, conns, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) SHORT_H_SIZE, 0, tipc_own_addr(n->net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) conn->peer_node, conn->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) conn->peer_port, TIPC_ERR_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (likely(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) skb_queue_tail(inputq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) list_del(&conn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) kfree(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * tipc_node_get_linkname - get the name of a link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * @bearer_id: id of the bearer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * @addr: peer node address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * @linkname: link name output buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * Returns 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) char *linkname, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct tipc_node *node = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (bearer_id >= MAX_BEARERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) link = node->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) strncpy(linkname, tipc_link_name(link), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* Caller should hold node lock for the passed node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct nlattr *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) NLM_F_MULTI, TIPC_NL_NODE_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) goto attr_msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (node_is_up(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) goto attr_msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) nla_nest_end(msg->skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) genlmsg_end(msg->skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) attr_msg_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) nla_nest_cancel(msg->skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) msg_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) genlmsg_cancel(msg->skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct tipc_msg *hdr = buf_msg(skb_peek(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct sk_buff_head inputq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) switch (msg_user(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) case TIPC_LOW_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) case TIPC_MEDIUM_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) case TIPC_HIGH_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) case TIPC_CRITICAL_IMPORTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (msg_connected(hdr) || msg_named(hdr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) msg_direct(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) tipc_loopback_trace(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) spin_lock_init(&list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) tipc_sk_rcv(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (msg_mcast(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) tipc_loopback_trace(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) skb_queue_head_init(&inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) tipc_sk_mcast_rcv(peer_net, list, &inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) __skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) skb_queue_purge(&inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) case MSG_FRAGMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (tipc_msg_assemble(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) tipc_loopback_trace(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) skb_queue_head_init(&inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) tipc_sk_mcast_rcv(peer_net, list, &inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) __skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) skb_queue_purge(&inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) case GROUP_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) case CONN_MANAGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) tipc_loopback_trace(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) spin_lock_init(&list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) tipc_sk_rcv(peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) case LINK_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) case NAME_DISTRIBUTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case TUNNEL_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case BCAST_PROTOCOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * tipc_node_xmit() is the general link level function for message sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * @net: the applicable net namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * @list: chain of buffers containing message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * @dnode: address of destination node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * @selector: a number used for deterministic link selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * Consumes the buffer chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) u32 dnode, int selector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct tipc_link_entry *le = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) bool node_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (in_own_node(net, dnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) tipc_loopback_trace(net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) spin_lock_init(&list->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) tipc_sk_rcv(net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) n = tipc_node_find(net, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (unlikely(!n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) __skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) node_up = node_is_up(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (node_up && n->peer_net && check_net(n->peer_net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) /* xmit inner linux container */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) tipc_lxc_xmit(n->peer_net, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (likely(skb_queue_empty(list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) bearer_id = n->active_links[selector & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (unlikely(bearer_id == INVALID_BEARER_ID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) __skb_queue_purge(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) le = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) spin_lock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) rc = tipc_link_xmit(le->link, list, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) spin_unlock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (unlikely(rc == -ENOBUFS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) tipc_node_link_down(n, bearer_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* tipc_node_xmit_skb(): send single buffer to destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * messages, which will not be rejected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * The only exception is datagram messages rerouted after secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * lookup, which are rare and safe to dispose of anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) u32 selector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct sk_buff_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) __skb_queue_head_init(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) __skb_queue_tail(&head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) tipc_node_xmit(net, &head, dnode, selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) u32 selector, dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) while ((skb = __skb_dequeue(xmitq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) selector = msg_origport(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) dnode = msg_destnode(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) tipc_node_xmit_skb(net, skb, dnode, selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct sk_buff *txskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) u32 dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /* Use broadcast if all nodes support it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) __skb_queue_tail(&xmitq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) tipc_bcast_xmit(net, &xmitq, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* Otherwise use legacy replicast method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) list_for_each_entry_rcu(n, tipc_nodes(net), list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dst = n->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (in_own_node(net, dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (!node_is_up(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) txskb = pskb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!txskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) msg_set_destnode(buf_msg(txskb), dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) tipc_node_xmit_skb(net, txskb, dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static void tipc_node_mcast_rcv(struct tipc_node *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) struct tipc_bclink_entry *be = &n->bc_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* 'arrvq' is under inputq2's lock protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) spin_lock_bh(&be->inputq2.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) spin_lock_bh(&be->inputq1.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) spin_unlock_bh(&be->inputq1.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) spin_unlock_bh(&be->inputq2.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) int bearer_id, struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct tipc_link *ucl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (rc & TIPC_LINK_DOWN_EVT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) tipc_node_reset_links(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (!(rc & TIPC_LINK_SND_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /* If probe message, a STATE response will be sent anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (msg_probe(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* Produce a STATE message carrying broadcast NACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ucl = n->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ucl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) tipc_link_build_state_msg(ucl, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * @net: the applicable net namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * @skb: TIPC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * @bearer_id: id of bearer message arrived on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * Invoked with no locks held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct tipc_bclink_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct tipc_link_entry *le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) int usr = msg_user(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u32 dnode = msg_destnode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /* If NACK for other node, let rcv link for that node peek into it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) n = tipc_node_find(net, dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) n = tipc_node_find(net, msg_prevnode(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) be = &n->bc_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) le = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) rc = tipc_bcast_rcv(net, be->link, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* Broadcast ACKs are sent on a unicast link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (rc & TIPC_LINK_SND_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) tipc_link_build_state_msg(le->link, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (!skb_queue_empty(&xmitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (!skb_queue_empty(&be->inputq1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) tipc_node_mcast_rcv(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (!skb_queue_empty(&n->bc_entry.namedq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) tipc_named_rcv(net, &n->bc_entry.namedq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) &n->bc_entry.named_rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) &n->bc_entry.named_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) /* If reassembly or retransmission failure => reset all links to peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (rc & TIPC_LINK_DOWN_EVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) tipc_node_reset_links(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * tipc_node_check_state - check and if necessary update node state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * @skb: TIPC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * @bearer_id: identity of bearer delivering the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * Returns true if state and msg are ok, otherwise false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int bearer_id, struct sk_buff_head *xmitq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct tipc_msg *hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) int usr = msg_user(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) int mtyp = msg_type(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) u16 oseqno = msg_seqno(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) u16 exp_pkts = msg_msgcnt(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) int state = n->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct tipc_link *l, *tnl, *pl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct tipc_media_addr *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) int pb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (trace_tipc_node_check_state_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) trace_tipc_skb_dump(skb, false, "skb for node state check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) trace_tipc_node_check_state(n, true, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) l = n->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rcv_nxt = tipc_link_rcv_nxt(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) /* Find parallel link, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if ((pb_id != bearer_id) && n->links[pb_id].link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) pl = n->links[pb_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (!tipc_link_validate_msg(l, hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /* Check and update node accesibility if applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (state == SELF_UP_PEER_COMING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (!tipc_link_is_up(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (!msg_peer_link_is_up(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (state == SELF_DOWN_PEER_LEAVING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (msg_peer_node_is_up(hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (state == SELF_LEAVING_PEER_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* Ignore duplicate packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* Initiate or update failover mode if applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) syncpt = oseqno + exp_pkts - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (pl && !tipc_link_is_reset(pl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) trace_tipc_node_link_down(n, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) "node link down <- failover!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) tipc_link_inputq(l));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* If parallel link was already down, and this happened before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * the tunnel link came up, node failover was never started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * Ensure that a FAILOVER_MSG is sent to get peer out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * NODE_FAILINGOVER state, also this node must accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * TUNNEL_MSGs from peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (n->state != NODE_FAILINGOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) tipc_node_link_failover(n, pl, l, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* If pkts arrive out of order, use lowest calculated syncpt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (less(syncpt, n->sync_point))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) n->sync_point = syncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Open parallel link when tunnel link reaches synch point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (!more(rcv_nxt, n->sync_point))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* No synching needed if only one link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (!pl || !tipc_link_is_up(pl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /* Initiate synch mode if applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (n->capabilities & TIPC_TUNNEL_ENHANCED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) syncpt = msg_syncpt(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (!tipc_link_is_up(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) __tipc_node_link_up(n, bearer_id, xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (n->state == SELF_UP_PEER_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) n->sync_point = syncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /* Open tunnel link when parallel link reaches synch point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (n->state == NODE_SYNCHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (tipc_link_is_synching(l)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) tnl = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) tnl = pl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) pl = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) inputq_len = skb_queue_len(tipc_link_inputq(pl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (more(dlv_nxt, n->sync_point)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (l == pl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (usr == LINK_PROTOCOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * tipc_rcv - process TIPC packets/messages arriving from off-node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * @net: the applicable net namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * @skb: TIPC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * @b: pointer to bearer message arrived on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * Invoked with no locks held. Bearer pointer must point to a valid bearer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * structure (i.e. cannot be NULL), but bearer can be inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) struct tipc_link_entry *le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) int bearer_id = b->identity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) u32 self = tipc_own_addr(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) int usr, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) u16 bc_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct tipc_ehdr *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* Check if message must be decrypted first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) goto rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ehdr = (struct tipc_ehdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (likely(ehdr->user != LINK_CONFIG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) n = tipc_node_find(net, ntohl(ehdr->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) n = tipc_node_find_by_id(net, ehdr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) rcv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /* Ensure message is well-formed before touching the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (unlikely(!tipc_msg_validate(&skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) usr = msg_user(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) bc_ack = msg_bcast_ack(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* Handle arrival of discovery or broadcast packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (unlikely(msg_non_seq(hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (unlikely(usr == LINK_CONFIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return tipc_disc_rcv(net, skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return tipc_node_bc_rcv(net, skb, bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* Discard unicast link messages destined for another node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* Locate neighboring node that sent packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) n = tipc_node_find(net, msg_prevnode(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) le = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /* Ensure broadcast reception is in synch with peer's send state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (unlikely(usr == LINK_PROTOCOL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (unlikely(skb_linearize(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Receive packet directly if conditions permit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) spin_lock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (le->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) rc = tipc_link_rcv(le->link, skb, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) spin_unlock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /* Check/update node state before receiving */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (unlikely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (unlikely(skb_linearize(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) goto out_node_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (le->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) rc = tipc_link_rcv(le->link, skb, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) tipc_node_write_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (unlikely(rc & TIPC_LINK_UP_EVT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) tipc_node_link_up(n, bearer_id, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (unlikely(rc & TIPC_LINK_DOWN_EVT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) tipc_node_link_down(n, bearer_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) tipc_named_rcv(net, &n->bc_entry.namedq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) &n->bc_entry.named_rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) &n->bc_entry.named_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) tipc_node_mcast_rcv(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (!skb_queue_empty(&le->inputq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) tipc_sk_rcv(net, &le->inputq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (!skb_queue_empty(&xmitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) out_node_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) int prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) int bearer_id = b->identity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct tipc_link_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) list_for_each_entry_rcu(n, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) e = &n->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (e->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (prop == TIPC_NLA_PROP_TOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) tipc_link_set_tolerance(e->link, b->tolerance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) else if (prop == TIPC_NLA_PROP_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) tipc_link_set_mtu(e->link, b->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /* Update MTU for node link entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) e->mtu = tipc_link_mss(e->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) tipc_node_write_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct tipc_node *peer, *temp_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* We identify the peer by its net */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (!info->attrs[TIPC_NLA_NET])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) info->attrs[TIPC_NLA_NET],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) tipc_nl_net_policy, info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (!attrs[TIPC_NLA_NET_ADDR])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (in_own_node(net, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) spin_lock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) peer = tipc_node_find(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) tipc_node_write_lock(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (peer->state != SELF_DOWN_PEER_DOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) peer->state != SELF_DOWN_PEER_LEAVING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) tipc_node_write_unlock(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) tipc_node_clear_links(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) tipc_node_write_unlock(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) tipc_node_delete(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /* Calculate cluster capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) tn->capabilities = TIPC_NODE_CAPABILITIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) tn->capabilities &= temp_node->capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) tipc_node_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) int done = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) int last_addr = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) msg.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) msg.portid = NETLINK_CB(cb->skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) msg.seq = cb->nlh->nlmsg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (last_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) node = tipc_node_find(net, last_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /* We never set seq or call nl_dump_check_consistent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * this means that setting prev_seq here will cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * consistence check to fail in the netlink callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * handler. Resulting in the NLMSG_DONE message having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * the NLM_F_DUMP_INTR flag set if the node state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * changed while we released the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) cb->prev_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) list_for_each_entry_rcu(node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (node->preliminary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (last_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (node->addr == last_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) last_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) err = __tipc_nl_add_node(&msg, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) last_addr = node->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) cb->args[0] = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) cb->args[1] = last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /* tipc_node_find_by_name - locate owner node of link by link's name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * @net: the applicable net namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * @name: pointer to link name string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * @bearer_id: pointer to index in 'node->links' array where the link was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * Returns pointer to node owning the link, or 0 if no matching link is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) static struct tipc_node *tipc_node_find_by_name(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) const char *link_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) unsigned int *bearer_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct tipc_link *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct tipc_node *found_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) *bearer_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) list_for_each_entry_rcu(n, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) tipc_node_read_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) for (i = 0; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) l = n->links[i].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) if (l && !strcmp(tipc_link_name(l), link_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) *bearer_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) found_node = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) tipc_node_read_unlock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (found_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return found_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct sk_buff_head xmitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) __skb_queue_head_init(&xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (!info->attrs[TIPC_NLA_LINK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) info->attrs[TIPC_NLA_LINK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) tipc_nl_link_policy, info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (!attrs[TIPC_NLA_LINK_NAME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (strcmp(name, tipc_bclink_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) return tipc_nl_bc_link_set(net, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) node = tipc_node_find_by_name(net, name, &bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) link = node->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (attrs[TIPC_NLA_LINK_PROP]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) res = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (props[TIPC_NLA_PROP_TOL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) u32 tol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) tipc_link_set_tolerance(link, tol, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (props[TIPC_NLA_PROP_PRIO]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) tipc_link_set_prio(link, prio, &xmitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (props[TIPC_NLA_PROP_WIN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) u32 max_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tipc_link_set_queue_limits(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) tipc_link_min_win(link),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) max_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) struct net *net = genl_info_net(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) msg.portid = info->snd_portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) msg.seq = info->snd_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (!info->attrs[TIPC_NLA_LINK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) info->attrs[TIPC_NLA_LINK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) tipc_nl_link_policy, info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (!attrs[TIPC_NLA_LINK_NAME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (!msg.skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (strcmp(name, tipc_bclink_name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) node = tipc_node_find_by_name(net, name, &bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) link = node->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) err = __tipc_nl_add_link(net, &msg, link, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return genlmsg_reply(msg.skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) nlmsg_free(msg.skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) char *link_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) unsigned int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) struct tipc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) struct tipc_link_entry *le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (!info->attrs[TIPC_NLA_LINK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) info->attrs[TIPC_NLA_LINK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) tipc_nl_link_policy, info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (!attrs[TIPC_NLA_LINK_NAME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (!strcmp(link_name, tipc_bclink_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) } else if (strstr(link_name, tipc_bclink_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) list_for_each_entry_rcu(node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) link = node->bc_entry.link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (link && !strcmp(link_name, tipc_link_name(link))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) err = tipc_bclink_reset_stats(net, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) node = tipc_node_find_by_name(net, link_name, &bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) le = &node->links[bearer_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) spin_lock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) link = node->links[bearer_id].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) spin_unlock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) tipc_link_reset_stats(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) spin_unlock_bh(&le->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* Caller should hold node lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct tipc_node *node, u32 *prev_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) bool bc_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) for (i = *prev_link; i < MAX_BEARERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) *prev_link = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (!node->links[i].link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) err = __tipc_nl_add_link(net, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) node->links[i].link, NLM_F_MULTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (bc_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) *prev_link = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) *prev_link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) struct tipc_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) u32 prev_node = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) u32 prev_link = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) int done = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) bool bc_link = cb->args[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (!prev_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* Check if broadcast-receiver links dumping is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (attrs && attrs[TIPC_NLA_LINK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) err = nla_parse_nested_deprecated(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) TIPC_NLA_LINK_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) attrs[TIPC_NLA_LINK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) tipc_nl_link_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) bc_link = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) msg.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) msg.portid = NETLINK_CB(cb->skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) msg.seq = cb->nlh->nlmsg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (prev_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) node = tipc_node_find(net, prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) /* We never set seq or call nl_dump_check_consistent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * this means that setting prev_seq here will cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * consistence check to fail in the netlink callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * handler. Resulting in the last NLMSG_DONE message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * having the NLM_F_DUMP_INTR flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) cb->prev_seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) tipc_node_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) list_for_each_entry_continue_rcu(node, &tn->node_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) err = __tipc_nl_add_node_links(net, &msg, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) &prev_link, bc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) prev_node = node->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) list_for_each_entry_rcu(node, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) tipc_node_read_lock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) err = __tipc_nl_add_node_links(net, &msg, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) &prev_link, bc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) tipc_node_read_unlock(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) prev_node = node->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) cb->args[0] = prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) cb->args[1] = prev_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) cb->args[2] = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) cb->args[3] = bc_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (!info->attrs[TIPC_NLA_MON])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) info->attrs[TIPC_NLA_MON],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) tipc_nl_monitor_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) err = tipc_nl_monitor_set_threshold(net, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct nlattr *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 0, TIPC_NL_MON_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) goto msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) val = tipc_nl_monitor_get_threshold(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) goto attr_msg_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) nla_nest_end(msg->skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) genlmsg_end(msg->skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) attr_msg_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) nla_nest_cancel(msg->skb, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) msg_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) genlmsg_cancel(msg->skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (!msg.skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) msg.portid = info->snd_portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) msg.seq = info->snd_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) err = __tipc_nl_add_monitor_prop(net, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) nlmsg_free(msg.skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) return genlmsg_reply(msg.skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) u32 prev_bearer = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) int bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) if (prev_bearer == MAX_BEARERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) msg.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) msg.portid = NETLINK_CB(cb->skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) msg.seq = cb->nlh->nlmsg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) err = __tipc_nl_add_monitor(net, &msg, bearer_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) cb->args[0] = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) u32 prev_node = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) u32 bearer_id = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) int done = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct tipc_nl_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) if (!prev_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (!attrs[TIPC_NLA_MON])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) attrs[TIPC_NLA_MON],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) tipc_nl_monitor_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (!mon[TIPC_NLA_MON_REF])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (bearer_id >= MAX_BEARERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) msg.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) msg.portid = NETLINK_CB(cb->skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) msg.seq = cb->nlh->nlmsg_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) cb->args[0] = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) cb->args[1] = prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) cb->args[2] = bearer_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) #ifdef CONFIG_TIPC_CRYPTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static int tipc_nl_retrieve_key(struct nlattr **attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) struct tipc_aead_key **pkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct tipc_aead_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (nla_len(attr) < sizeof(*key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) key = (struct tipc_aead_key *)nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) nla_len(attr) < tipc_aead_key_size(key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) *pkey = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (nla_len(attr) < TIPC_NODEID_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) *node_id = (u8 *)nla_data(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) *intv = nla_get_u32(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) struct tipc_node *n = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct tipc_aead_key *ukey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) bool rekeying = true, master_key = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) u8 *id, *own_id, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) u32 intv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) if (!info->attrs[TIPC_NLA_NODE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) info->attrs[TIPC_NLA_NODE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) tipc_nl_node_policy, info->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) own_id = tipc_own_id(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (!own_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) rc = tipc_nl_retrieve_rekeying(attrs, &intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (rc == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) rekeying = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) rc = tipc_nl_retrieve_key(attrs, &ukey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (rc == -ENODATA && rekeying)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) goto rekeying;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) else if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) rc = tipc_aead_key_validate(ukey, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) rc = tipc_nl_retrieve_nodeid(attrs, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) case -ENODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) mode = CLUSTER_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) mode = PER_NODE_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) if (memcmp(id, own_id, NODE_ID_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) n = tipc_node_find_by_id(net, id) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) tipc_node_create(net, 0, id, 0xffffu, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (unlikely(!n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) c = n->crypto_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) /* Initiate the TX/RX key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) rc = tipc_crypto_key_init(c, ukey, mode, master_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) tipc_node_put(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) if (unlikely(rc < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) } else if (c == tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) /* Distribute TX key but not master one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) GENL_SET_ERR_MSG(info, "failed to replicate new key");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) rekeying:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* Schedule TX rekeying if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) tipc_crypto_rekeying_sched(tx, rekeying, intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) err = __tipc_nl_node_set_key(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) static int __tipc_nl_node_flush_key(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) tipc_crypto_key_flush(tn->crypto_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) list_for_each_entry_rcu(n, &tn->node_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) tipc_crypto_key_flush(n->crypto_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) err = __tipc_nl_node_flush_key(skb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) * tipc_node_dump - dump TIPC node data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * @n: tipc node to be dumped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * @more: dump more?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * - false: dump only tipc node data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * - true: dump node link data as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) * @buf: returned buffer of dump data in format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) i += scnprintf(buf, sz, "node data: (null)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) i += scnprintf(buf, sz, "node data: %x", n->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) i += scnprintf(buf + i, sz - i, " %x", n->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) i += scnprintf(buf + i, sz - i, " %u", n->working_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (!more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) i += scnprintf(buf + i, sz - i, " media: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) i += scnprintf(buf + i, sz - i, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) i += scnprintf(buf + i, sz - i, " inputq: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) i += scnprintf(buf + i, sz - i, " media: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) i += scnprintf(buf + i, sz - i, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) i += scnprintf(buf + i, sz - i, " inputq: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) i += scnprintf(buf + i, sz - i, "bclink:\n ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) void tipc_node_pre_cleanup_net(struct net *exit_net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct tipc_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct tipc_net *tn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct net *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) for_each_net_rcu(tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) if (tmp == exit_net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) tn = tipc_net(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (!tn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) spin_lock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) list_for_each_entry_rcu(n, &tn->node_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (!n->peer_net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (n->peer_net != exit_net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) tipc_node_write_lock(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) n->peer_net = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) n->peer_hash_mix = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) tipc_node_write_unlock_fast(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) spin_unlock_bh(&tn->node_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) }