^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * net/tipc/name_distr.c: TIPC name distribution code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2000-2006, 2014, Ericsson AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2005, 2010-2011, Wind River Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 3. Neither the names of the copyright holders nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * contributors may be used to endorse or promote products derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Alternatively, this software may be distributed under the terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * GNU General Public License ("GPL") version 2 as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "link.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "name_distr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int sysctl_tipc_named_timeout __read_mostly = 2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct distr_queue_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct distr_item i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 dtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * publ_to_item - add publication info to a publication message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void publ_to_item(struct distr_item *i, struct publication *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) i->type = htonl(p->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) i->lower = htonl(p->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) i->upper = htonl(p->upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) i->port = htonl(p->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) i->key = htonl(p->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * named_prepare_buf - allocate & initialize a publication message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * The buffer returned is of size INT_H_SIZE + payload size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 self = tipc_own_addr(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct tipc_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (buf != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) msg = buf_msg(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) type, INT_H_SIZE, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) msg_set_size(msg, INT_H_SIZE + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * tipc_named_publish - tell other nodes about a new publication by this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct name_table *nt = tipc_name_table(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct distr_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (publ->scope == TIPC_NODE_SCOPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) write_lock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) list_add_tail(&publ->binding_node, &nt->cluster_scope);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) write_unlock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) pr_warn("Publication distribution failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) msg_set_non_legacy(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) item = (struct distr_item *)msg_data(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) publ_to_item(item, publ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct name_table *nt = tipc_name_table(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct distr_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) write_lock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) list_del(&publ->binding_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) write_unlock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (publ->scope == TIPC_NODE_SCOPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pr_warn("Withdrawal distribution failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) msg_set_non_legacy(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) item = (struct distr_item *)msg_data(buf_msg(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) publ_to_item(item, publ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * named_distribute - prepare name info for bulk distribution to another node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @list: list of messages (buffers) to be returned from this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @dnode: node to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @pls: linked list of publication items to be packed into buffer chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void named_distribute(struct net *net, struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u32 dnode, struct list_head *pls, u16 seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct publication *publ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct distr_item *item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ITEM_SIZE) * ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 msg_rem = msg_dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) list_for_each_entry(publ, pls, binding_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Prepare next buffer: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) skb = named_prepare_buf(net, PUBLICATION, msg_rem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pr_warn("Bulk publication failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) msg_set_bc_ack_invalid(hdr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) msg_set_bulk(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) msg_set_non_legacy(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) item = (struct distr_item *)msg_data(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Pack publication into message: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) publ_to_item(item, publ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) msg_rem -= ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Append full buffer to list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!msg_rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) msg_rem = msg_dsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) hdr = buf_msg(skb_peek_tail(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) msg_set_last_bulk(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) msg_set_named_seqno(hdr, seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * tipc_named_node_up - tell specified node about all publications by this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct name_table *nt = tipc_name_table(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct sk_buff_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u16 seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __skb_queue_head_init(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!(capabilities & TIPC_NAMED_BCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) nt->rc_dests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) seqno = nt->snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) read_lock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) tipc_node_xmit(net, &head, dnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) read_unlock_bh(&nt->cluster_scope_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * tipc_publ_purge - remove publication associated with a failed node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * Invoked for each publication issued by a newly failed node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Removes publication structure from name table & deletes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct publication *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) publ->node, publ->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) tipc_node_unsubscribe(net, &p->binding_node, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (p != publ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_err("Unable to remove publication from failed node\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) " (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) publ->type, publ->lower, publ->node, publ->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) publ->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) kfree_rcu(p, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * tipc_dist_queue_purge - remove deferred updates from a node that went down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void tipc_dist_queue_purge(struct net *net, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct tipc_net *tn = net_generic(net, tipc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct distr_queue_item *e, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (e->node != addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_del(&e->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 addr, u16 capabilities)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct name_table *nt = tipc_name_table(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct publication *publ, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tipc_publ_purge(net, publ, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) tipc_dist_queue_purge(net, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!(capabilities & TIPC_NAMED_BCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) nt->rc_dests--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * tipc_update_nametbl - try to process a nametable update and notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * subscribers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * tipc_nametbl_lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Returns the publication item if successful, otherwise NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 node, u32 dtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct publication *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u32 lower = ntohl(i->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 upper = ntohl(i->upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 type = ntohl(i->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 port = ntohl(i->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 key = ntohl(i->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (dtype == PUBLICATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) p = tipc_nametbl_insert_publ(net, type, lower, upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) TIPC_CLUSTER_SCOPE, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) port, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) tipc_node_subscribe(net, &p->binding_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) } else if (dtype == WITHDRAWAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) p = tipc_nametbl_remove_publ(net, type, lower,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) upper, node, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tipc_node_unsubscribe(net, &p->binding_node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kfree_rcu(p, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) type, lower, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pr_warn_ratelimited("Unknown name table message received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u16 *rcv_nxt, bool *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct sk_buff *skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u16 seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_lock_bh(&namedq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) skb_queue_walk_safe(namedq, skb, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (unlikely(skb_linearize(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) __skb_unlink(skb, namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) seqno = msg_named_seqno(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (msg_is_last_bulk(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *rcv_nxt = seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *open = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) __skb_unlink(skb, namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) spin_unlock_bh(&namedq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (*open && (*rcv_nxt == seqno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) (*rcv_nxt)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __skb_unlink(skb, namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_unlock_bh(&namedq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (less(seqno, *rcv_nxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __skb_unlink(skb, namedq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_bh(&namedq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * tipc_named_rcv - process name table update messages sent by another node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u16 *rcv_nxt, bool *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct distr_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct tipc_msg *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 count, node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hdr = buf_msg(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) node = msg_orignode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) item = (struct distr_item *)msg_data(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) count = msg_data_sz(hdr) / ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) tipc_update_nametbl(net, item, node, msg_type(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * tipc_named_reinit - re-initialize local publications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * This routine is called whenever TIPC networking is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * All name table entries published by this node are updated to reflect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * the node's new network address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void tipc_named_reinit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct name_table *nt = tipc_name_table(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct tipc_net *tn = tipc_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct publication *publ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u32 self = tipc_own_addr(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) spin_lock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) publ->node = self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) publ->node = self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nt->rc_dests = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) spin_unlock_bh(&tn->nametbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }