Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * lowcomms.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * This is the "low-level" comms layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * It is responsible for sending/receiving messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * from other nodes in the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Cluster nodes are referred to by their nodeids. nodeids are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * simply 32 bit numbers to the locking module - if they need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * be expanded for the cluster infrastructure then that is its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * responsibility. It is this layer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * responsibility to resolve these into IP address or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * whatever it needs for inter-node communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * The comms level is two kernel threads that deal mainly with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * the receiving of messages from other nodes and passing them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * up to the mid-level comms layer (which understands the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * message format) for execution by the locking core, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * a send thread which does all the setting up of connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * to remote nodes and the sending of data. Threads are not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * to send their own data because it may cause them to wait in times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * of high load. Also, this way, the sending thread can collect together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * messages bound for one node and send them in one block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * lowcomms will choose to use either TCP or SCTP as its transport layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * depending on the configuration variable 'protocol'. This should be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * cluster-wide mechanism as it must be the same on all nodes of the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * for the DLM to function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/ioctls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/sctp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <net/sctp/sctp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include "dlm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include "lowcomms.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include "midcomms.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include "config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define NEEDED_RMEM (4*1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define CONN_HASH_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /* Number of messages to send before rescheduling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define MAX_SEND_MSG_COUNT 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) struct connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct socket *sock;	/* NULL if not connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	uint32_t nodeid;	/* So we know who we are in the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct mutex sock_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define CF_READ_PENDING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define CF_WRITE_PENDING 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define CF_INIT_PENDING 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define CF_IS_OTHERCON 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define CF_CLOSE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define CF_APP_LIMITED 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define CF_CLOSING 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define CF_SHUTDOWN 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	spinlock_t writequeue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	int (*rx_action) (struct connection *);	/* What to do when active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	void (*connect_action) (struct connection *);	/* What to do to connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	int retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define MAX_CONNECT_RETRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct hlist_node list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct connection *othercon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct work_struct rwork; /* Receive workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct work_struct swork; /* Send workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned char *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	int rx_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	int rx_leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define sock2con(x) ((struct connection *)(x)->sk_user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) /* An entry waiting to be sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) struct writequeue_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	int users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) struct dlm_node_addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	int nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	int addr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	int curr_addr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) static struct listen_sock_callbacks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	void (*sk_error_report)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	void (*sk_data_ready)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	void (*sk_state_change)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	void (*sk_write_space)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) } listen_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static LIST_HEAD(dlm_node_addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static DEFINE_SPINLOCK(dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int dlm_local_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static int dlm_allow_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /* Work queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static struct workqueue_struct *recv_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static struct workqueue_struct *send_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static struct hlist_head connection_hash[CONN_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static DEFINE_SPINLOCK(connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) DEFINE_STATIC_SRCU(connections_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static void process_recv_sockets(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static void process_send_sockets(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /* This is deliberately very simple because most clusters have simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)    sequential nodeids, so we should be able to go straight to a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)    struct in the array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static inline int nodeid_hash(int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return nodeid & (CONN_HASH_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static struct connection *__find_con(int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	int r, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	r = nodeid_hash(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	idx = srcu_read_lock(&connections_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		if (con->nodeid == nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			srcu_read_unlock(&connections_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			return con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	srcu_read_unlock(&connections_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * If 'allocation' is zero then we don't attempt to create a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * connection structure for this node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static struct connection *nodeid2con(int nodeid, gfp_t alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct connection *con, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	con = __find_con(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (con || !alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	con = kzalloc(sizeof(*con), alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	con->rx_buflen = dlm_config.ci_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (!con->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		kfree(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	con->nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	mutex_init(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	INIT_LIST_HEAD(&con->writequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	spin_lock_init(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	INIT_WORK(&con->swork, process_send_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	INIT_WORK(&con->rwork, process_recv_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	init_waitqueue_head(&con->shutdown_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	/* Setup action pointers for child sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (con->nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		struct connection *zerocon = __find_con(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		con->connect_action = zerocon->connect_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		if (!con->rx_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			con->rx_action = zerocon->rx_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	r = nodeid_hash(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	spin_lock(&connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	/* Because multiple workqueues/threads calls this function it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	 * race on multiple cpu's. Instead of locking hot path __find_con()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 * we just check in rare cases of recently added nodes again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	 * under protection of connections_lock. If this is the case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 * abort our connection creation and return the existing connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	tmp = __find_con(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		spin_unlock(&connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		kfree(con->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		kfree(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	hlist_add_head_rcu(&con->list, &connection_hash[r]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	spin_unlock(&connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) /* Loop round all connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static void foreach_conn(void (*conn_func)(struct connection *c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	idx = srcu_read_lock(&connections_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	for (i = 0; i < CONN_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		hlist_for_each_entry_rcu(con, &connection_hash[i], list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			conn_func(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	srcu_read_unlock(&connections_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static struct dlm_node_addr *find_node_addr(int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct dlm_node_addr *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	list_for_each_entry(na, &dlm_node_addrs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (na->nodeid == nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			return na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	switch (x->ss_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	case AF_INET: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		if (sinx->sin_port != siny->sin_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	case AF_INET6: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		if (sinx->sin6_port != siny->sin6_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			  struct sockaddr *sa_out, bool try_new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	struct sockaddr_storage sas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct dlm_node_addr *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!dlm_local_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	spin_lock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	na = find_node_addr(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (na && na->addr_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		memcpy(&sas, na->addr[na->curr_addr_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		       sizeof(struct sockaddr_storage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		if (try_new_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			na->curr_addr_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			if (na->curr_addr_index == na->addr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				na->curr_addr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (!na)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (!na->addr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (sas_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (!sa_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (dlm_local_addr[0]->ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		ret6->sin6_addr = in6->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct dlm_node_addr *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	int rv = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	int addr_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	spin_lock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	list_for_each_entry(na, &dlm_node_addrs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (!na->addr_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			if (addr_compare(na->addr[addr_i], addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 				*nodeid = na->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 				rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	struct sockaddr_storage *new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct dlm_node_addr *new_node, *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (!new_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (!new_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		kfree(new_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	memcpy(new_addr, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	spin_lock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	na = find_node_addr(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (!na) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		new_node->nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		new_node->addr[0] = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		new_node->addr_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		list_add(&new_node->list, &dlm_node_addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		kfree(new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		kfree(new_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	na->addr[na->addr_count++] = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	kfree(new_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) /* Data available on socket or listen socket received a connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static void lowcomms_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	con = sock2con(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		queue_work(recv_workqueue, &con->rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static void lowcomms_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	con = sock2con(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		con->sock->sk->sk_write_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	queue_work(send_workqueue, &con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static inline void lowcomms_connect_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (test_bit(CF_CLOSE, &con->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	queue_work(send_workqueue, &con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static void lowcomms_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* SCTP layer is not calling sk_data_ready when the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * is done, so we catch the signal through here. Also, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * doesn't switch socket state when entering shutdown, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * skip the write in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if (sk->sk_shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		if (sk->sk_shutdown == RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			lowcomms_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	} else if (sk->sk_state == TCP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		lowcomms_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) int dlm_lowcomms_connect_node(int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (nodeid == dlm_our_nodeid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	con = nodeid2con(nodeid, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	lowcomms_connect_sock(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static void lowcomms_error_report(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	void (*orig_report)(struct sock *) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct inet_sock *inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	con = sock2con(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (con == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	orig_report = listen_sock.sk_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	switch (sk->sk_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				   "sending to node %d at %pI4, dport %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 				   con->nodeid, &inet->inet_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 				   ntohs(inet->inet_dport), sk->sk_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 				   sk->sk_err_soft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				   "sending to node %d at %pI6c, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				   "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 				   con->nodeid, &sk->sk_v6_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 				   ntohs(inet->inet_dport), sk->sk_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 				   sk->sk_err_soft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 				   "invalid socket family %d set, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 				   sk->sk_family, sk->sk_err, sk->sk_err_soft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (orig_report)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		orig_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) /* Note: sk_callback_lock must be locked before calling this function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static void save_listen_callbacks(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	listen_sock.sk_data_ready = sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	listen_sock.sk_state_change = sk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	listen_sock.sk_write_space = sk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	listen_sock.sk_error_report = sk->sk_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static void restore_callbacks(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	sk->sk_data_ready = listen_sock.sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	sk->sk_state_change = listen_sock.sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	sk->sk_write_space = listen_sock.sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	sk->sk_error_report = listen_sock.sk_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) /* Make a socket active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static void add_sock(struct socket *sock, struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	con->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	sk->sk_user_data = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/* Install a data_ready callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	sk->sk_data_ready = lowcomms_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	sk->sk_write_space = lowcomms_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	sk->sk_state_change = lowcomms_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	sk->sk_allocation = GFP_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	sk->sk_error_report = lowcomms_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) /* Add the port number to an IPv6 or 4 sockaddr and return the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)    length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			  int *addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	saddr->ss_family =  dlm_local_addr[0]->ss_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (saddr->ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		in4_addr->sin_port = cpu_to_be16(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		*addr_len = sizeof(struct sockaddr_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		in6_addr->sin6_port = cpu_to_be16(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		*addr_len = sizeof(struct sockaddr_in6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) /* Close a remote connection and tidy up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static void close_connection(struct connection *con, bool and_other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			     bool tx, bool rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (tx && !closing && cancel_work_sync(&con->swork)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		log_print("canceled swork for node %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		clear_bit(CF_WRITE_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (rx && !closing && cancel_work_sync(&con->rwork)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		log_print("canceled rwork for node %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		clear_bit(CF_READ_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (con->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		restore_callbacks(con->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		sock_release(con->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		con->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (con->othercon && and_other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		/* Will only re-enter once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		close_connection(con->othercon, false, tx, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	con->rx_leftover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	con->retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	clear_bit(CF_CLOSING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static void shutdown_connection(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	flush_work(&con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* nothing to shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (!con->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	set_bit(CF_SHUTDOWN, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	ret = kernel_sock_shutdown(con->sock, SHUT_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		log_print("Connection %p failed to shutdown: %d will force close",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			  con, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		goto force_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		ret = wait_event_timeout(con->shutdown_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 					 !test_bit(CF_SHUTDOWN, &con->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 					 DLM_SHUTDOWN_WAIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			log_print("Connection %p shutdown timed out, will force close",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 				  con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			goto force_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) force_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	clear_bit(CF_SHUTDOWN, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	close_connection(con, false, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static void dlm_tcp_shutdown(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (con->othercon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		shutdown_connection(con->othercon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	shutdown_connection(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static int con_realloc_receive_buf(struct connection *con, int newlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	unsigned char *newbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	newbuf = kmalloc(newlen, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (!newbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/* copy any leftover from last receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (con->rx_leftover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		memmove(newbuf, con->rx_buf, con->rx_leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/* swap to new buffer space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	kfree(con->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	con->rx_buflen = newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	con->rx_buf = newbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) /* Data received from remote end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static int receive_from_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	int call_again_soon = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	int ret, buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (con->sock == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (con->nodeid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	/* realloc if we get new buffer size to read out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	buflen = dlm_config.ci_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		ret = con_realloc_receive_buf(con, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			goto out_resched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/* calculate new buffer parameter regarding last receive and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * possible leftover bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	iov.iov_base = con->rx_buf + con->rx_leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	iov.iov_len = con->rx_buflen - con->rx_leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			     msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	else if (ret == iov.iov_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		call_again_soon = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* new buflen according readed bytes and leftover from last receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	buflen = ret + con->rx_leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/* calculate leftover bytes from process and put it into begin of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * the receive buffer, so next receive we have the full message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * at the start address of the receive buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	con->rx_leftover = buflen - ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (con->rx_leftover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		memmove(con->rx_buf, con->rx_buf + ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			con->rx_leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		call_again_soon = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (call_again_soon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		goto out_resched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) out_resched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		queue_work(recv_workqueue, &con->rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) out_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		/* Reconnect when there is something to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		close_connection(con, false, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			log_print("connection %p got EOF from %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				  con, con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			/* handling for tcp shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			clear_bit(CF_SHUTDOWN, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			wake_up(&con->shutdown_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			/* signal to breaking receive worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) /* Listening socket is busy, accept a connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static int accept_from_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	struct sockaddr_storage peeraddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct socket *newsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct connection *newcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct connection *addcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	unsigned int mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (!dlm_allow_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	mutex_lock_nested(&con->sock_mutex, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (!con->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		goto accept_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	/* Get the connected socket's peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	memset(&peeraddr, 0, sizeof(peeraddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		result = -ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		goto accept_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	/* Get the new node's NODEID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	make_sockaddr(&peeraddr, 0, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (addr_to_nodeid(&peeraddr, &nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		unsigned char *b=(unsigned char *)&peeraddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		log_print("connect from non cluster node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				     b, sizeof(struct sockaddr_storage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		sock_release(newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	dlm_comm_mark(nodeid, &mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	sock_set_mark(newsock->sk, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	log_print("got connection from %d", nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/*  Check to see if we already have a connection to this node. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 *  could happen if the two nodes initiate a connection at roughly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 *  the same time and the connections cross on the wire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 *  In this case we store the incoming one in "othercon"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	newcon = nodeid2con(nodeid, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (!newcon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		goto accept_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	mutex_lock_nested(&newcon->sock_mutex, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (newcon->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		struct connection *othercon = newcon->othercon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (!othercon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			if (!othercon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				log_print("failed to allocate incoming socket");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				mutex_unlock(&newcon->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 				goto accept_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			othercon->rx_buflen = dlm_config.ci_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			othercon->rx_buf = kmalloc(othercon->rx_buflen, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			if (!othercon->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				mutex_unlock(&newcon->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				kfree(othercon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				log_print("failed to allocate incoming socket receive buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				goto accept_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			othercon->nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			othercon->rx_action = receive_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			mutex_init(&othercon->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			INIT_LIST_HEAD(&othercon->writequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			spin_lock_init(&othercon->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			INIT_WORK(&othercon->swork, process_send_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			INIT_WORK(&othercon->rwork, process_recv_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			init_waitqueue_head(&othercon->shutdown_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			set_bit(CF_IS_OTHERCON, &othercon->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			/* close other sock con if we have something new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			close_connection(othercon, false, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		mutex_lock_nested(&othercon->sock_mutex, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		newcon->othercon = othercon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		add_sock(newsock, othercon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		addcon = othercon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		mutex_unlock(&othercon->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		newcon->rx_action = receive_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		/* accept copies the sk after we've saved the callbacks, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		   don't want to save them a second time or comm errors will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		   result in calling sk_error_report recursively. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		add_sock(newsock, newcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		addcon = newcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	mutex_unlock(&newcon->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * Add it to the active queue in case we got data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * between processing the accept adding the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * to the read_sockets list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		queue_work(recv_workqueue, &addcon->rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) accept_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (newsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		sock_release(newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (result != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		log_print("error accepting connection from node: %d", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static void free_entry(struct writequeue_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	__free_page(e->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * writequeue_entry_complete - try to delete and free write queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * @e: write queue entry to try to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * @completed: bytes completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * writequeue_lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	e->offset += completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	e->len -= completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (e->len == 0 && e->users == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		list_del(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		free_entry(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * sctp_bind_addrs - bind a SCTP socket to all our addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static int sctp_bind_addrs(struct connection *con, uint16_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	struct sockaddr_storage localaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	struct sockaddr *addr = (struct sockaddr *)&localaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	int i, addr_len, result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	for (i = 0; i < dlm_local_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		make_sockaddr(&localaddr, port, &addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			result = kernel_bind(con->sock, addr, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			result = sock_bind_add(con->sock->sk, addr, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			log_print("Can't bind to %d addr number %d, %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				  port, i + 1, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) /* Initiate an SCTP association.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)    This is a special case of send_to_sock() in that we don't yet have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)    peeled-off socket for this association, so we use the listening socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)    and add the primary IP address of the remote node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static void sctp_connect_to_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct sockaddr_storage daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	int addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	unsigned int mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (con->nodeid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		log_print("attempt to connect sock 0 foiled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	dlm_comm_mark(con->nodeid, &mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* Some odd races can cause double-connects, ignore them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (con->retries++ > MAX_CONNECT_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (con->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		log_print("node %d already connected.", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	memset(&daddr, 0, sizeof(daddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		log_print("no address for nodeid %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* Create a socket to communicate with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		goto socket_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	sock_set_mark(sock->sk, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	con->rx_action = receive_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	con->connect_action = sctp_connect_to_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	add_sock(sock, con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* Bind to all addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (sctp_bind_addrs(con, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		goto bind_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	log_print("connecting to %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* Turn off Nagle's algorithm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	sctp_sock_set_nodelay(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * Make sock->ops->connect() function return in specified time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * since O_NONBLOCK argument in connect() function does not work here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * then, we should restore the default value of this attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	sock_set_sndtimeo(sock->sk, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				   0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	sock_set_sndtimeo(sock->sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (result == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (result == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) bind_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	con->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) socket_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * Some errors are fatal and this list might need adjusting. For other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * errors we try again until the max number of retries is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (result != -EHOSTUNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	    result != -ENETUNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	    result != -ENETDOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	    result != -EINVAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	    result != -EPROTONOSUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		log_print("connect %d try %d error %d", con->nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			  con->retries, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		lowcomms_connect_sock(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Connect a new socket to its peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void tcp_connect_to_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct sockaddr_storage saddr, src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	int addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct socket *sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	unsigned int mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (con->nodeid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		log_print("attempt to connect sock 0 foiled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	dlm_comm_mark(con->nodeid, &mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (con->retries++ > MAX_CONNECT_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	/* Some odd races can cause double-connects, ignore them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (con->sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/* Create a socket to communicate with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				  SOCK_STREAM, IPPROTO_TCP, &sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	sock_set_mark(sock->sk, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	memset(&saddr, 0, sizeof(saddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		log_print("no address for nodeid %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	con->rx_action = receive_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	con->connect_action = tcp_connect_to_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	con->shutdown_action = dlm_tcp_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	add_sock(sock, con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	/* Bind to our cluster-known address connecting to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	   routing problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	make_sockaddr(&src_addr, 0, &addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				 addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		log_print("could not bind for connect: %d", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		/* This *may* not indicate a critical error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	log_print("connecting to %d", con->nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	/* Turn off Nagle's algorithm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	tcp_sock_set_nodelay(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				   O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (result == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (result == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (con->sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		sock_release(con->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		con->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	} else if (sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * Some errors are fatal and this list might need adjusting. For other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 * errors we try again until the max number of retries is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (result != -EHOSTUNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	    result != -ENETUNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	    result != -ENETDOWN && 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	    result != -EINVAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	    result != -EPROTONOSUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		log_print("connect %d try %d error %d", con->nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			  con->retries, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		lowcomms_connect_sock(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static struct socket *tcp_create_listen_sock(struct connection *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 					     struct sockaddr_storage *saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	struct socket *sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	int addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (dlm_local_addr[0]->ss_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		addr_len = sizeof(struct sockaddr_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		addr_len = sizeof(struct sockaddr_in6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	/* Create a socket to communicate with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				  SOCK_STREAM, IPPROTO_TCP, &sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		log_print("Can't create listening comms socket");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		goto create_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	sock_set_mark(sock->sk, dlm_config.ci_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	/* Turn off Nagle's algorithm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	tcp_sock_set_nodelay(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	sock_set_reuseaddr(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	write_lock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	sock->sk->sk_user_data = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	save_listen_callbacks(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	con->rx_action = accept_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	con->connect_action = tcp_connect_to_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	write_unlock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	/* Bind to our port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		con->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		goto create_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	sock_set_keepalive(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	result = sock->ops->listen(sock, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		goto create_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) create_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* Get local addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void init_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	struct sockaddr_storage sas, *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	dlm_local_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		if (dlm_our_addr(&sas, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		dlm_local_addr[dlm_local_count++] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static void deinit_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	for (i = 0; i < dlm_local_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		kfree(dlm_local_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* Initialise SCTP socket and bind to all interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int sctp_listen_for_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	struct socket *sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	int result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	struct connection *con = nodeid2con(0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	log_print("Using SCTP for communications");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		log_print("Can't create comms socket, check SCTP is loaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	sock_set_mark(sock->sk, dlm_config.ci_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	sctp_sock_set_nodelay(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	write_lock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	/* Init con struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	sock->sk->sk_user_data = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	save_listen_callbacks(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	con->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	con->rx_action = accept_from_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	con->connect_action = sctp_connect_to_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	write_unlock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	/* Bind to all addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		goto create_delsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	result = sock->ops->listen(sock, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		log_print("Can't set socket listening");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		goto create_delsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) create_delsock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	con->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int tcp_listen_for_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct socket *sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct connection *con = nodeid2con(0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	int result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	/* We don't support multi-homed hosts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (dlm_local_addr[1] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		log_print("TCP protocol can't handle multi-homed hosts, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			  "try SCTP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	log_print("Using TCP for communications");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		add_sock(sock, con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		result = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) static struct writequeue_entry *new_writequeue_entry(struct connection *con,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 						     gfp_t allocation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	struct writequeue_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	entry = kmalloc(sizeof(struct writequeue_entry), allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	entry->page = alloc_page(allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (!entry->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	entry->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	entry->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	entry->end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	entry->users = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	entry->con = con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct writequeue_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	con = nodeid2con(nodeid, allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (!con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if ((&e->list == &con->writequeue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	    (PAGE_SIZE - e->end < len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		offset = e->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		e->end += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		e->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	got_one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		*ppc = page_address(e->page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	e = new_writequeue_entry(con, allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		offset = e->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		e->end += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		e->users++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		list_add_tail(&e->list, &con->writequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		goto got_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) void dlm_lowcomms_commit_buffer(void *mh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	struct writequeue_entry *e = (struct writequeue_entry *)mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	struct connection *con = e->con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	int users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	users = --e->users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	e->len = e->end - e->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	queue_work(send_workqueue, &con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* Send a message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static void send_to_sock(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct writequeue_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	int len, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (con->sock == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		goto out_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		e = list_entry(con->writequeue.next, struct writequeue_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			       list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		if ((struct list_head *) e == &con->writequeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		len = e->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		offset = e->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		BUG_ON(len == 0 && e->users == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			ret = kernel_sendpage(con->sock, e->page, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 					      msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			if (ret == -EAGAIN || ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 				if (ret == -EAGAIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 				    test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 					/* Notify TCP that we're limited by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 					 * application window size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 					set_bit(SOCK_NOSPACE, &con->sock->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 					con->sock->sk->sk_write_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			} else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 				goto send_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		/* Don't starve people filling buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (++count >= MAX_SEND_MSG_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		writequeue_entry_complete(e, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) send_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	close_connection(con, false, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	/* Requeue the send work. When the work daemon runs again, it will try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	   a new connection, then call this function again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	queue_work(send_workqueue, &con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) out_connect:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	queue_work(send_workqueue, &con->swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static void clean_one_writequeue(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	struct writequeue_entry *e, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	spin_lock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		list_del(&e->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		free_entry(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	spin_unlock(&con->writequeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Called from recovery when it knows that a node has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)    left the cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) int dlm_lowcomms_close(int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	struct dlm_node_addr *na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	log_print("closing connection to node %d", nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	con = nodeid2con(nodeid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (con) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		set_bit(CF_CLOSE, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		close_connection(con, true, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		clean_one_writequeue(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	spin_lock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	na = find_node_addr(nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (na) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		list_del(&na->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		while (na->addr_count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			kfree(na->addr[na->addr_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		kfree(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* Receive workqueue function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static void process_recv_sockets(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	struct connection *con = container_of(work, struct connection, rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	clear_bit(CF_READ_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		err = con->rx_action(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	} while (!err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Send workqueue function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static void process_send_sockets(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct connection *con = container_of(work, struct connection, swork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	clear_bit(CF_WRITE_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (con->sock == NULL) /* not mutex protected so check it inside too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		con->connect_action(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	if (!list_empty(&con->writequeue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		send_to_sock(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static void work_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (recv_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		destroy_workqueue(recv_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	if (send_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		destroy_workqueue(send_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static int work_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	recv_workqueue = alloc_workqueue("dlm_recv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (!recv_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		log_print("can't start dlm_recv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	send_workqueue = alloc_workqueue("dlm_send",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (!send_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		log_print("can't start dlm_send");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		destroy_workqueue(recv_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) static void _stop_conn(struct connection *con, bool and_other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	mutex_lock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	set_bit(CF_CLOSE, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	set_bit(CF_READ_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	set_bit(CF_WRITE_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if (con->sock && con->sock->sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		write_lock_bh(&con->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		con->sock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	if (con->othercon && and_other)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		_stop_conn(con->othercon, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	mutex_unlock(&con->sock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void stop_conn(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	_stop_conn(con, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void shutdown_conn(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (con->shutdown_action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		con->shutdown_action(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static void connection_release(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	struct connection *con = container_of(rcu, struct connection, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	kfree(con->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	kfree(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static void free_conn(struct connection *con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	close_connection(con, true, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	spin_lock(&connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	hlist_del_rcu(&con->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	spin_unlock(&connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (con->othercon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		clean_one_writequeue(con->othercon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		call_rcu(&con->othercon->rcu, connection_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	clean_one_writequeue(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	call_rcu(&con->rcu, connection_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static void work_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	int ok, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		foreach_conn(stop_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		if (recv_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 			flush_workqueue(recv_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		if (send_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			flush_workqueue(send_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		idx = srcu_read_lock(&connections_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			hlist_for_each_entry_rcu(con, &connection_hash[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 						 list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 				ok &= test_bit(CF_READ_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 				ok &= test_bit(CF_WRITE_PENDING, &con->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				if (con->othercon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 					ok &= test_bit(CF_READ_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 						       &con->othercon->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 					ok &= test_bit(CF_WRITE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 						       &con->othercon->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		srcu_read_unlock(&connections_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	} while (!ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) void dlm_lowcomms_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	/* Set all the flags to prevent any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	   socket activity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	dlm_allow_conn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	if (recv_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		flush_workqueue(recv_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (send_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		flush_workqueue(send_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	foreach_conn(shutdown_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	work_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	foreach_conn(free_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	work_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	deinit_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int dlm_lowcomms_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	int error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct connection *con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	for (i = 0; i < CONN_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		INIT_HLIST_HEAD(&connection_hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	init_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (!dlm_local_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		error = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		log_print("no local IP address has been set");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	error = work_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	dlm_allow_conn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* Start listening */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (dlm_config.ci_protocol == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		error = tcp_listen_for_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		error = sctp_listen_for_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		goto fail_unlisten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) fail_unlisten:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	dlm_allow_conn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	con = nodeid2con(0,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (con)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		free_conn(con);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) void dlm_lowcomms_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct dlm_node_addr *na, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	spin_lock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		list_del(&na->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		while (na->addr_count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			kfree(na->addr[na->addr_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		kfree(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	spin_unlock(&dlm_node_addrs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }