Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)    drbd_state.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)    from Logicworks, Inc. for making SDP replication support possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/drbd_limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "drbd_int.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "drbd_protocol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "drbd_req.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "drbd_state_change.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) struct after_state_chg_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	union drbd_state os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	union drbd_state ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	enum chg_state_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	struct completion *done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) enum sanitize_state_warnings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	NO_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	ABORTED_ONLINE_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	ABORTED_RESYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	CONNECTION_LOST_NEGOTIATING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	IMPLICITLY_UPGRADED_DISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	IMPLICITLY_UPGRADED_PDSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static void count_objects(struct drbd_resource *resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 			  unsigned int *n_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 			  unsigned int *n_connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	*n_devices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	*n_connections = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	idr_for_each_entry(&resource->devices, device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		(*n_devices)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	for_each_connection(connection, resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		(*n_connections)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static struct drbd_state_change *alloc_state_change(unsigned int n_devices, unsigned int n_connections, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	unsigned int size, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	size = sizeof(struct drbd_state_change) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	       n_devices * sizeof(struct drbd_device_state_change) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	       n_connections * sizeof(struct drbd_connection_state_change) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	       n_devices * n_connections * sizeof(struct drbd_peer_device_state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	state_change = kmalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (!state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	state_change->n_devices = n_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	state_change->n_connections = n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	state_change->devices = (void *)(state_change + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	state_change->connections = (void *)&state_change->devices[n_devices];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	state_change->peer_devices = (void *)&state_change->connections[n_connections];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	state_change->resource->resource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	for (n = 0; n < n_devices; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		state_change->devices[n].device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	for (n = 0; n < n_connections; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		state_change->connections[n].connection = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) struct drbd_state_change *remember_old_state(struct drbd_resource *resource, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	unsigned int n_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	unsigned int n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct drbd_device_state_change *device_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct drbd_peer_device_state_change *peer_device_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct drbd_connection_state_change *connection_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	/* Caller holds req_lock spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 * No state, no device IDR, no connections lists can change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	count_objects(resource, &n_devices, &n_connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	state_change = alloc_state_change(n_devices, n_connections, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (!state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	kref_get(&resource->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	state_change->resource->resource = resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	state_change->resource->role[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		conn_highest_role(first_connection(resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	state_change->resource->susp[OLD] = resource->susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	state_change->resource->susp_nod[OLD] = resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	state_change->resource->susp_fen[OLD] = resource->susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	connection_state_change = state_change->connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	for_each_connection(connection, resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		kref_get(&connection->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		connection_state_change->connection = connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		connection_state_change->cstate[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			connection->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		connection_state_change->peer_role[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			conn_highest_peer(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		connection_state_change++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	device_state_change = state_change->devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	peer_device_state_change = state_change->peer_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	idr_for_each_entry(&resource->devices, device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		kref_get(&device->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		device_state_change->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		device_state_change->disk_state[OLD] = device->state.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		/* The peer_devices for each device have to be enumerated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		   the order of the connections. We may not use for_each_peer_device() here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		for_each_connection(connection, resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			peer_device = conn_peer_device(connection, device->vnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			peer_device_state_change->peer_device = peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			peer_device_state_change->disk_state[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 				device->state.pdsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			peer_device_state_change->repl_state[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 				max_t(enum drbd_conns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 				      C_WF_REPORT_PARAMS, device->state.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			peer_device_state_change->resync_susp_user[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 				device->state.user_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			peer_device_state_change->resync_susp_peer[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				device->state.peer_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			peer_device_state_change->resync_susp_dependency[OLD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				device->state.aftr_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			peer_device_state_change++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		device_state_change++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static void remember_new_state(struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct drbd_resource_state_change *resource_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (!state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	resource_state_change = &state_change->resource[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	resource = resource_state_change->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	resource_state_change->role[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		conn_highest_role(first_connection(resource));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	resource_state_change->susp[NEW] = resource->susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	resource_state_change->susp_nod[NEW] = resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	resource_state_change->susp_fen[NEW] = resource->susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	for (n = 0; n < state_change->n_devices; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		struct drbd_device_state_change *device_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			&state_change->devices[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		struct drbd_device *device = device_state_change->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		device_state_change->disk_state[NEW] = device->state.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	for (n = 0; n < state_change->n_connections; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		struct drbd_connection_state_change *connection_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			&state_change->connections[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		struct drbd_connection *connection =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			connection_state_change->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		connection_state_change->cstate[NEW] = connection->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		connection_state_change->peer_role[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			conn_highest_peer(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	for (n = 0; n < state_change->n_devices * state_change->n_connections; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		struct drbd_peer_device_state_change *peer_device_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			&state_change->peer_devices[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		struct drbd_device *device =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			peer_device_state_change->peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		union drbd_dev_state state = device->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		peer_device_state_change->disk_state[NEW] = state.pdsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		peer_device_state_change->repl_state[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			max_t(enum drbd_conns, C_WF_REPORT_PARAMS, state.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		peer_device_state_change->resync_susp_user[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			state.user_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		peer_device_state_change->resync_susp_peer[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			state.peer_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		peer_device_state_change->resync_susp_dependency[NEW] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			state.aftr_isp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) void copy_old_to_new_state_change(struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define OLD_TO_NEW(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	(x[NEW] = x[OLD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	OLD_TO_NEW(resource_state_change->role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	OLD_TO_NEW(resource_state_change->susp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	OLD_TO_NEW(resource_state_change->susp_nod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	OLD_TO_NEW(resource_state_change->susp_fen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		struct drbd_connection_state_change *connection_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				&state_change->connections[n_connection];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		OLD_TO_NEW(connection_state_change->peer_role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		OLD_TO_NEW(connection_state_change->cstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	for (n_device = 0; n_device < state_change->n_devices; n_device++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		struct drbd_device_state_change *device_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			&state_change->devices[n_device];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		OLD_TO_NEW(device_state_change->disk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	n_peer_devices = state_change->n_devices * state_change->n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		struct drbd_peer_device_state_change *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			&state_change->peer_devices[n_peer_device];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		OLD_TO_NEW(p->disk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		OLD_TO_NEW(p->repl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		OLD_TO_NEW(p->resync_susp_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		OLD_TO_NEW(p->resync_susp_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		OLD_TO_NEW(p->resync_susp_dependency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #undef OLD_TO_NEW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) void forget_state_change(struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (state_change->resource->resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		kref_put(&state_change->resource->resource->kref, drbd_destroy_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	for (n = 0; n < state_change->n_devices; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		struct drbd_device *device = state_change->devices[n].device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			kref_put(&device->kref, drbd_destroy_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	for (n = 0; n < state_change->n_connections; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		struct drbd_connection *connection =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			state_change->connections[n].connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			kref_put(&connection->kref, drbd_destroy_connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	kfree(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) static int w_after_state_ch(struct drbd_work *w, int unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) static void after_state_ch(struct drbd_device *device, union drbd_state os,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			   union drbd_state ns, enum chg_state_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			   struct drbd_state_change *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				       union drbd_state ns, enum sanitize_state_warnings *warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static inline bool is_susp(union drbd_state s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)         return s.susp || s.susp_nod || s.susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) bool conn_all_vols_unconf(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	bool rv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		if (device->state.disk != D_DISKLESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		    device->state.conn != C_STANDALONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		    device->state.role != R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			rv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) /* Unfortunately the states where not correctly ordered, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)    they where defined. therefore can not use max_t() here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (role1 == R_PRIMARY || role2 == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return R_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		return R_SECONDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return R_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		return R_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (role1 == R_SECONDARY || role2 == R_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return R_SECONDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	return R_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) enum drbd_role conn_highest_role(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	enum drbd_role role = R_SECONDARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		role = max_role(role, device->state.role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) enum drbd_role conn_highest_peer(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	enum drbd_role peer = R_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		peer = max_role(peer, device->state.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	return peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	enum drbd_disk_state disk_state = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return disk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	enum drbd_disk_state disk_state = D_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return disk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	enum drbd_disk_state disk_state = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return disk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	enum drbd_conns conn = C_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		conn = min_t(enum drbd_conns, conn, device->state.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static bool no_peer_wf_report_params(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	bool rv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			rv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static void wake_up_all_devices(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		wake_up(&peer_device->device->state_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * cl_wide_st_chg() - true if the state change is a cluster wide one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * @os:		old (current) state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * @ns:		new (wanted) state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static int cl_wide_st_chg(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			  union drbd_state os, union drbd_state ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		  (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		  (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		  (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		(os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static union drbd_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	union drbd_state ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	ns.i = (os.i & ~mask.i) | val.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		  union drbd_state mask, union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	union drbd_state ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	spin_lock_irqsave(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	ns = apply_mask_val(drbd_read_state(device), mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	rv = _drbd_set_state(device, ns, f, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  * drbd_force_state() - Impose a change which happens outside our control on our state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * @mask:	mask of state bits to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * @val:	value of new state bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) void drbd_force_state(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	union drbd_state mask, union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	drbd_change_state(device, CS_HARD, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) _req_st_cond(struct drbd_device *device, union drbd_state mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	     union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	union drbd_state os, ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return SS_CW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		return SS_CW_FAILED_BY_PEER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	spin_lock_irqsave(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	rv = is_valid_transition(os, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (rv >= SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (!cl_wide_st_chg(device, os, ns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		rv = SS_CW_NO_NEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (rv == SS_UNKNOWN_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		rv = is_valid_state(device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if (rv >= SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			if (rv >= SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * drbd_req_state() - Perform an eventually cluster wide state change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * @mask:	mask of state bits to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * @val:	value of new state bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @f:		flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * Should not be called directly, use drbd_request_state() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * _drbd_request_state().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) drbd_req_state(struct drbd_device *device, union drbd_state mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	       union drbd_state val, enum chg_state_flags f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	union drbd_state os, ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	void *buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	init_completion(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (f & CS_SERIALIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		mutex_lock(device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (f & CS_INHIBIT_MD_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		buffer = drbd_md_get_buffer(device, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	spin_lock_irqsave(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	rv = is_valid_transition(os, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (cl_wide_st_chg(device, os, ns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		rv = is_valid_state(device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		if (rv == SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			if (f & CS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 				print_st_err(device, os, ns, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		if (drbd_send_state_req(first_peer_device(device), mask, val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			rv = SS_CW_FAILED_BY_PEER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			if (f & CS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 				print_st_err(device, os, ns, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		wait_event(device->state_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			(rv = _req_st_cond(device, mask, val)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			if (f & CS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 				print_st_err(device, os, ns, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		spin_lock_irqsave(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		ns = apply_mask_val(drbd_read_state(device), mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		rv = _drbd_set_state(device, ns, f, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		rv = _drbd_set_state(device, ns, f, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		wait_for_completion(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		drbd_md_put_buffer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (f & CS_SERIALIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		mutex_unlock(device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * _drbd_request_state() - Request a state change (with flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * @mask:	mask of state bits to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * @val:	value of new state bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * @f:		flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * flag, or when logging of failed state change requests is not desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) _drbd_request_state(struct drbd_device *device, union drbd_state mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		    union drbd_state val, enum chg_state_flags f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	wait_event(device->state_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		   (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * there is IO in-flight: the transition into D_FAILED for detach purposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * may get misinterpreted as actual IO error in a confused endio function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * We wrap it all into wait_event(), to retry in case the drbd_req_state()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * returns SS_IN_TRANSIENT_STATE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * To avoid potential deadlock with e.g. the receiver thread trying to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * drbd_md_get_buffer() while trying to get out of the "transient state", we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  * need to grab and release the meta data buffer inside of that wait_event loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) request_detach(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	return drbd_req_state(device, NS(disk, D_FAILED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) int drbd_request_detach_interruptible(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	int ret, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	wait_event_interruptible(device->state_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		(rv = request_detach(device)) != SS_IN_TRANSIENT_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	ret = wait_event_interruptible(device->misc_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			device->state.disk != D_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (rv == SS_IS_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		rv = SS_NOTHING_TO_DO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		rv = ERR_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) _drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		    union drbd_state val, enum chg_state_flags f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	BUG_ON(f & CS_SERIALIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	wait_event_cmd(device->state_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		       (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		       mutex_unlock(device->state_mutex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		       mutex_lock(device->state_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static void print_st(struct drbd_device *device, const char *name, union drbd_state ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	    name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	    drbd_conn_str(ns.conn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	    drbd_role_str(ns.role),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	    drbd_role_str(ns.peer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	    drbd_disk_str(ns.disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	    drbd_disk_str(ns.pdsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	    is_susp(ns) ? 's' : 'r',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	    ns.aftr_isp ? 'a' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	    ns.peer_isp ? 'p' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	    ns.user_isp ? 'u' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	    ns.susp_fen ? 'F' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	    ns.susp_nod ? 'N' : '-'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	    );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) void print_st_err(struct drbd_device *device, union drbd_state os,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	          union drbd_state ns, enum drbd_state_rv err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (err == SS_IN_TRANSIENT_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	print_st(device, " state", os);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	print_st(device, "wanted", ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			       enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	char *pbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	pbp = pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	*pbp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (ns.role != os.role && flags & CS_DC_ROLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		pbp += sprintf(pbp, "role( %s -> %s ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			       drbd_role_str(os.role),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			       drbd_role_str(ns.role));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (ns.peer != os.peer && flags & CS_DC_PEER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		pbp += sprintf(pbp, "peer( %s -> %s ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			       drbd_role_str(os.peer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			       drbd_role_str(ns.peer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (ns.conn != os.conn && flags & CS_DC_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		pbp += sprintf(pbp, "conn( %s -> %s ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			       drbd_conn_str(os.conn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			       drbd_conn_str(ns.conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (ns.disk != os.disk && flags & CS_DC_DISK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		pbp += sprintf(pbp, "disk( %s -> %s ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			       drbd_disk_str(os.disk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			       drbd_disk_str(ns.disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			       drbd_disk_str(os.pdsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			       drbd_disk_str(ns.pdsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return pbp - pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 				 enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	char pb[300];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	char *pbp = pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (ns.aftr_isp != os.aftr_isp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			       os.aftr_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			       ns.aftr_isp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (ns.peer_isp != os.peer_isp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			       os.peer_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			       ns.peer_isp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	if (ns.user_isp != os.user_isp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			       os.user_isp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			       ns.user_isp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (pbp != pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		drbd_info(device, "%s\n", pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				 enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	char pb[300];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	char *pbp = pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	pbp += print_state_change(pbp, os, ns, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		pbp += sprintf(pbp, "susp( %d -> %d ) ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			       is_susp(os),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			       is_susp(ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (pbp != pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		drbd_info(connection, "%s\n", pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  * is_valid_state() - Returns an SS_ error code if ns is not valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * @ns:		State to consider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) is_valid_state(struct drbd_device *device, union drbd_state ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* See drbd_state_sw_errors in drbd_strings.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	enum drbd_fencing_p fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	enum drbd_state_rv rv = SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	fp = FP_DONT_CARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (nc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		if (!nc->two_primaries && ns.role == R_PRIMARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			if (ns.peer == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				rv = SS_TWO_PRIMARIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				rv = SS_O_VOL_PEER_PRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (rv <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		goto out; /* already found a reason to abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	else if (ns.role == R_SECONDARY && device->open_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		rv = SS_DEVICE_IN_USE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		rv = SS_NO_UP_TO_DATE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	else if (fp >= FP_RESOURCE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		rv = SS_PRIMARY_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		rv = SS_NO_UP_TO_DATE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		rv = SS_NO_LOCAL_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		rv = SS_NO_REMOTE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		rv = SS_NO_UP_TO_DATE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	else if ((ns.conn == C_CONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		  ns.conn == C_WF_BITMAP_S ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		  ns.conn == C_SYNC_SOURCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		  ns.conn == C_PAUSED_SYNC_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		  ns.disk == D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		rv = SS_CONNECTED_OUTDATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		 (nc->verify_alg[0] == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		rv = SS_NO_VERIFY_ALG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		  first_peer_device(device)->connection->agreed_pro_version < 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		rv = SS_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		rv = SS_NO_UP_TO_DATE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)                  ns.pdsk == D_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		rv = SS_NEED_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		rv = SS_CONNECTED_OUTDATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * This function limits state transitions that may be declined by DRBD. I.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * user requests (aka soft transitions).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * @ns:		new state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * @os:		old state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	enum drbd_state_rv rv = SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	    os.conn > C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		rv = SS_RESYNC_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		rv = SS_ALREADY_STANDALONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		rv = SS_IS_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		rv = SS_NO_NET_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		rv = SS_LOWER_THAN_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		rv = SS_IN_TRANSIENT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* While establishing a connection only allow cstate to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	   Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (test_bit(STATE_SENT, &connection->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	    !((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	      (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		rv = SS_IN_TRANSIENT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* Do not promote during resync handshake triggered by "force primary".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * This is a hack. It should really be rejected by the peer during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * cluster wide state change request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (os.role != R_PRIMARY && ns.role == R_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		&& ns.pdsk == D_UP_TO_DATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		&& ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		&& (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			rv = SS_IN_TRANSIENT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		rv = SS_NEED_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	    ns.conn != os.conn && os.conn > C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		rv = SS_RESYNC_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	    os.conn < C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		rv = SS_NEED_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	    && os.conn < C_WF_REPORT_PARAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	    os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		rv = SS_OUTDATE_WO_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* no change -> nothing to do, at least for the connection part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (oc == nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		return SS_NOTHING_TO_DO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* disconnect of an unconfigured connection does not make sense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	if (oc == C_STANDALONE && nc == C_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		return SS_ALREADY_STANDALONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/* from C_STANDALONE, we start with C_UNCONNECTED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	if (oc == C_STANDALONE && nc != C_UNCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		return SS_NEED_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	/* When establishing a connection we need to go through WF_REPORT_PARAMS!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	   Necessary to do the right thing upon invalidate-remote on a disconnected resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		return SS_NEED_CONNECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	/* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return SS_IN_TRANSIENT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	/* After C_DISCONNECTING only C_STANDALONE may follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (oc == C_DISCONNECTING && nc != C_STANDALONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		return SS_IN_TRANSIENT_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	return SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * This limits hard state transitions. Hard state transitions are facts there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * imposed on DRBD by the environment. E.g. disk broke or network broke down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * But those hard state transitions are still not allowed to do everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * @ns:		new state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * @os:		old state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) is_valid_transition(union drbd_state os, union drbd_state ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	rv = is_valid_conn_transition(os.conn, ns.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	/* we cannot fail (again) if we already detached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		rv = SS_IS_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_state_warnings warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	static const char *msg_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		[NO_WARNING] = "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		[ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		[ABORTED_RESYNC] = "Resync aborted.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		[CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		[IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		[IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	if (warn != NO_WARNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		drbd_warn(device, "%s\n", msg_table[warn]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * @os:		old state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * @ns:		new state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * @warn_sync_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  * When we loose connection, we have to set the state of the peers disk (pdsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  * to D_UNKNOWN. This rule and many more along those lines are in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				       union drbd_state ns, enum sanitize_state_warnings *warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	enum drbd_fencing_p fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		*warn = NO_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	fp = FP_DONT_CARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	/* Implications from connection to peer and peer_isp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (ns.conn < C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		ns.peer_isp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		ns.peer = R_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			ns.pdsk = D_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	/* Clear the aftr_isp when becoming unconfigured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		ns.aftr_isp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/* An implication of the disk states onto the connection state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	/* Abort resync if a disk fails/detaches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			*warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 				ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		ns.conn = C_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* Connection breaks down before we finished "Negotiating" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	    get_ldev_if_state(device, D_NEGOTIATING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			ns.disk = device->new_state_tmp.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			ns.pdsk = device->new_state_tmp.pdsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				*warn = CONNECTION_LOST_NEGOTIATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			ns.disk = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			ns.pdsk = D_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	/* D_CONSISTENT and D_OUTDATED vanish when we get connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			ns.disk = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			ns.pdsk = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	/* Implications of the connection state on the disk states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	disk_min = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	disk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	pdsk_min = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	pdsk_max = D_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	switch ((enum drbd_conns)ns.conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	case C_WF_BITMAP_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	case C_PAUSED_SYNC_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	case C_STARTING_SYNC_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	case C_WF_SYNC_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	case C_BEHIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		disk_min = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		disk_max = D_OUTDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		pdsk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		pdsk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	case C_VERIFY_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	case C_VERIFY_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		disk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		disk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		pdsk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		pdsk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	case C_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		disk_min = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		disk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		pdsk_min = D_DISKLESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		pdsk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	case C_WF_BITMAP_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	case C_PAUSED_SYNC_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	case C_STARTING_SYNC_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	case C_AHEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		disk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		disk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		pdsk_min = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	case C_SYNC_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		disk_min = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		disk_max = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		pdsk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		pdsk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	case C_SYNC_SOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		disk_min = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		disk_max = D_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		pdsk_min = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		pdsk_max = D_INCONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	case C_STANDALONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	case C_DISCONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	case C_UNCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	case C_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	case C_BROKEN_PIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	case C_NETWORK_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	case C_PROTOCOL_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	case C_TEAR_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	case C_WF_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	case C_WF_REPORT_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	case C_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (ns.disk > disk_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		ns.disk = disk_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (ns.disk < disk_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			*warn = IMPLICITLY_UPGRADED_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		ns.disk = disk_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (ns.pdsk > pdsk_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		ns.pdsk = pdsk_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	if (ns.pdsk < pdsk_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			*warn = IMPLICITLY_UPGRADED_PDSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		ns.pdsk = pdsk_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (fp == FP_STONITH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	    (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	    !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	    !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		if (ns.conn == C_SYNC_SOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			ns.conn = C_PAUSED_SYNC_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		if (ns.conn == C_SYNC_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			ns.conn = C_PAUSED_SYNC_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		if (ns.conn == C_PAUSED_SYNC_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			ns.conn = C_SYNC_SOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		if (ns.conn == C_PAUSED_SYNC_T)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			ns.conn = C_SYNC_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) void drbd_resume_al(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		drbd_info(device, "Resumed AL updates\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* helper for _drbd_set_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (first_peer_device(device)->connection->agreed_pro_version < 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		device->ov_start_sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	device->rs_total = drbd_bm_bits(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	device->ov_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (cs == C_VERIFY_T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		/* starting online verify from an arbitrary position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * does not fit well into the existing protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 * on C_VERIFY_T, we initialize ov_left and friends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		 * implicitly in receive_DataRequest once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		 * first P_OV_REQUEST is received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		device->ov_start_sector = ~(sector_t)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		if (bit >= device->rs_total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			device->ov_start_sector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				BM_BIT_TO_SECT(device->rs_total - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			device->rs_total = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			device->rs_total -= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		device->ov_position = device->ov_start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	device->ov_left = device->rs_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * _drbd_set_state() - Set a new DRBD state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * @ns:		new state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * @flags:	Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  * @done:	Optional completion, that will get completed after the after_state_ch() finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * Caller needs to hold req_lock. Do not call directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) _drbd_set_state(struct drbd_device *device, union drbd_state ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	        enum chg_state_flags flags, struct completion *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct drbd_peer_device *peer_device = first_peer_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	union drbd_state os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	enum drbd_state_rv rv = SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	enum sanitize_state_warnings ssw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct after_state_chg_work *ascw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	ns = sanitize_state(device, os, ns, &ssw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (ns.i == os.i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		return SS_NOTHING_TO_DO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	rv = is_valid_transition(os, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (!(flags & CS_HARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		/*  pre-state-change checks ; only look at ns  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		/* See drbd_state_sw_errors in drbd_strings.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		rv = is_valid_state(device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			/* If the old state was illegal as well, then let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			   this happen...*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 			if (is_valid_state(device, os) == rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				rv = is_valid_soft_transition(os, ns, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			rv = is_valid_soft_transition(os, ns, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		if (flags & CS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			print_st_err(device, os, ns, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	print_sanitize_warnings(device, ssw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	drbd_pr_state_change(device, os, ns, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	/* Display changes to the susp* flags that where caused by the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	   sanitize_state(). Only display it here if we where not called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	   _conn_request_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (!(flags & CS_DC_SUSP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		conn_pr_state_change(connection, os, ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 				     (flags & ~CS_DC_MASK) | CS_DC_SUSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	 * drbd_ldev_destroy() won't happen before our corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 * after_state_ch works run, where we put_ldev again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		atomic_inc(&device->local_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (!is_sync_state(os.conn) && is_sync_state(ns.conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		clear_bit(RS_DONE, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	/* FIXME: Have any flags been set earlier in this function already? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	state_change = remember_old_state(device->resource, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	/* changes to local_cnt and device flags should be visible before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	 * changes to state, which again should be visible before anything else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	 * depending on that change happens. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	device->state.i = ns.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	device->resource->susp = ns.susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	device->resource->susp_nod = ns.susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	device->resource->susp_fen = ns.susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	remember_new_state(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	/* put replicated vs not-replicated requests in seperate epochs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (drbd_should_do_remote((union drbd_dev_state)os.i) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	    drbd_should_do_remote((union drbd_dev_state)ns.i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		start_new_tl_epoch(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		drbd_print_uuids(device, "attached to UUIDs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* Wake up role changes, that were delayed because of connection establishing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	    no_peer_wf_report_params(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		clear_bit(STATE_SENT, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		wake_up_all_devices(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	wake_up(&device->misc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	wake_up(&device->state_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	wake_up(&connection->ping_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* Aborted verify run, or we reached the stop sector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 * Log the last position, unless end-of-device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	    ns.conn <= C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		device->ov_start_sector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		if (device->ov_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			drbd_info(device, "Online Verify reached sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 				(unsigned long long)device->ov_start_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	    (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		drbd_info(device, "Syncer continues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		device->rs_paused += (long)jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 				  -(long)device->rs_mark_time[device->rs_last_mark];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		if (ns.conn == C_SYNC_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			mod_timer(&device->resync_timer, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	    (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		drbd_info(device, "Resync suspended\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		device->rs_mark_time[device->rs_last_mark] = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	if (os.conn == C_CONNECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	    (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		set_ov_position(device, ns.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		device->rs_start = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		device->rs_last_sect_ev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		device->ov_last_oos_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		device->ov_last_oos_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			device->rs_mark_left[i] = device->ov_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			device->rs_mark_time[i] = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		drbd_rs_controller_reset(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		if (ns.conn == C_VERIFY_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			drbd_info(device, "Starting Online Verify from sector %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 					(unsigned long long)device->ov_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			mod_timer(&device->resync_timer, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 						 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 						 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		mdf &= ~MDF_AL_CLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		if (test_bit(CRASHED_PRIMARY, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			mdf |= MDF_CRASHED_PRIMARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (device->state.role == R_PRIMARY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		    (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			mdf |= MDF_PRIMARY_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		if (device->state.conn > C_WF_REPORT_PARAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			mdf |= MDF_CONNECTED_IND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		if (device->state.disk > D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			mdf |= MDF_CONSISTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		if (device->state.disk > D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			mdf |= MDF_WAS_UP_TO_DATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			mdf |= MDF_PEER_OUT_DATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		if (mdf != device->ldev->md.flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			device->ldev->md.flags = mdf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			drbd_md_mark_dirty(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		set_bit(CONSIDER_RESYNC, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	/* Receiver should clean up itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		drbd_thread_stop_nowait(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	/* Now the receiver finished cleaning up itself, it should die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		drbd_thread_stop_nowait(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	/* Upon network failure, we need to restart the receiver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (os.conn > C_WF_CONNECTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	    ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		drbd_thread_restart_nowait(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/* Resume AL writing if we get a connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		drbd_resume_al(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		connection->connect_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	/* remember last attach time so request_timer_fn() won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 * kill newly established sessions while we are still trying to thaw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 * previously frozen IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	    ns.disk > D_NEGOTIATING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		device->last_reattach_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (ascw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		ascw->os = os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		ascw->ns = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		ascw->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		ascw->w.cb = w_after_state_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		ascw->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		ascw->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		ascw->state_change = state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		drbd_queue_work(&connection->sender_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 				&ascw->w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		drbd_err(device, "Could not kmalloc an ascw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int w_after_state_ch(struct drbd_work *w, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct after_state_chg_work *ascw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		container_of(w, struct after_state_chg_work, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct drbd_device *device = ascw->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	after_state_ch(device, ascw->os, ascw->ns, ascw->flags, ascw->state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	forget_state_change(ascw->state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (ascw->flags & CS_WAIT_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		complete(ascw->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	kfree(ascw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static void abw_start_sync(struct drbd_device *device, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		drbd_err(device, "Writing the bitmap failed not starting resync.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		_drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	switch (device->state.conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	case C_STARTING_SYNC_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	case C_STARTING_SYNC_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		drbd_start_resync(device, C_SYNC_SOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) int drbd_bitmap_io_from_worker(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		int (*io_fn)(struct drbd_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		char *why, enum bm_flag flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	/* open coded non-blocking drbd_suspend_io(device); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	atomic_inc(&device->suspend_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	drbd_bm_lock(device, why, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	rv = io_fn(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	drbd_bm_unlock(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	drbd_resume_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) void notify_resource_state_change(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 				  unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 				  struct drbd_resource_state_change *resource_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 				  enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	struct drbd_resource *resource = resource_state_change->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct resource_info resource_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		.res_role = resource_state_change->role[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		.res_susp = resource_state_change->susp[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		.res_susp_nod = resource_state_change->susp_nod[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		.res_susp_fen = resource_state_change->susp_fen[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	notify_resource_state(skb, seq, resource, &resource_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) void notify_connection_state_change(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 				    unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				    struct drbd_connection_state_change *connection_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 				    enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct drbd_connection *connection = connection_state_change->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct connection_info connection_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		.conn_connection_state = connection_state_change->cstate[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		.conn_role = connection_state_change->peer_role[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	notify_connection_state(skb, seq, connection, &connection_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) void notify_device_state_change(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 				struct drbd_device_state_change *device_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 				enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	struct drbd_device *device = device_state_change->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	struct device_info device_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		.dev_disk_state = device_state_change->disk_state[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	notify_device_state(skb, seq, device, &device_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) void notify_peer_device_state_change(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 				     unsigned int seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 				     struct drbd_peer_device_state_change *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				     enum drbd_notification_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	struct drbd_peer_device *peer_device = p->peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct peer_device_info peer_device_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		.peer_repl_state = p->repl_state[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		.peer_disk_state = p->disk_state[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		.peer_resync_susp_user = p->resync_susp_user[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		.peer_resync_susp_peer = p->resync_susp_peer[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static void broadcast_state_change(struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	bool resource_state_has_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	void (*last_func)(struct sk_buff *, unsigned int, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 			  enum drbd_notification_type) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	void *last_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) #define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) #define FINAL_STATE_CHANGE(type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	({ if (last_func) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		last_func(NULL, 0, last_arg, type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) #define REMEMBER_STATE_CHANGE(func, arg, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	   last_func = (typeof(last_func))func; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	   last_arg = arg; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	mutex_lock(&notification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	resource_state_has_changed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	    HAS_CHANGED(resource_state_change->role) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	    HAS_CHANGED(resource_state_change->susp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	    HAS_CHANGED(resource_state_change->susp_nod) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	    HAS_CHANGED(resource_state_change->susp_fen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (resource_state_has_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		REMEMBER_STATE_CHANGE(notify_resource_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 				      resource_state_change, NOTIFY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		struct drbd_connection_state_change *connection_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 				&state_change->connections[n_connection];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		if (HAS_CHANGED(connection_state_change->peer_role) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		    HAS_CHANGED(connection_state_change->cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			REMEMBER_STATE_CHANGE(notify_connection_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 					      connection_state_change, NOTIFY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	for (n_device = 0; n_device < state_change->n_devices; n_device++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		struct drbd_device_state_change *device_state_change =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			&state_change->devices[n_device];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		if (HAS_CHANGED(device_state_change->disk_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			REMEMBER_STATE_CHANGE(notify_device_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 					      device_state_change, NOTIFY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	n_peer_devices = state_change->n_devices * state_change->n_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	for (n_peer_device = 0; n_peer_device < n_peer_devices; n_peer_device++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		struct drbd_peer_device_state_change *p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			&state_change->peer_devices[n_peer_device];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (HAS_CHANGED(p->disk_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		    HAS_CHANGED(p->repl_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		    HAS_CHANGED(p->resync_susp_user) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		    HAS_CHANGED(p->resync_susp_peer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		    HAS_CHANGED(p->resync_susp_dependency))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 			REMEMBER_STATE_CHANGE(notify_peer_device_state_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 					      p, NOTIFY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	FINAL_STATE_CHANGE(NOTIFY_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	mutex_unlock(&notification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #undef HAS_CHANGED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #undef FINAL_STATE_CHANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) #undef REMEMBER_STATE_CHANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /* takes old and new peer disk state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	&&  (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	/* Scenario, starting with normal operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	 * Connected Primary/Secondary UpToDate/UpToDate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	 * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	 * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	if (os == D_UNKNOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	&&  (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)  * after_state_ch() - Perform after state change actions that may sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * @device:	DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  * @os:		old state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  * @ns:		new state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  * @flags:	Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static void after_state_ch(struct drbd_device *device, union drbd_state os,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			   union drbd_state ns, enum chg_state_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			   struct drbd_state_change *state_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	struct drbd_resource *resource = device->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	struct drbd_peer_device *peer_device = first_peer_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	struct sib_info sib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	broadcast_state_change(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	sib.sib_reason = SIB_STATE_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	sib.os = os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	sib.ns = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	if ((os.disk != D_UP_TO_DATE || os.pdsk != D_UP_TO_DATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	&&  (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		clear_bit(CRASHED_PRIMARY, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		if (device->p_uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			device->p_uuid[UI_FLAGS] &= ~((u64)2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	/* Inform userspace about the change... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	drbd_bcast_event(device, &sib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	    (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		drbd_khelper(device, "pri-on-incon-degr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	/* Here we have the actions that are performed after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	   state change. This function might sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (ns.susp_nod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		enum drbd_req_event what = NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			what = RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		    conn_lowest_disk(connection) == D_UP_TO_DATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			what = RESTART_FROZEN_DISK_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		if (resource->susp_nod && what != NOTHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			_tl_restart(connection, what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			_conn_request_state(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 					    (union drbd_state) { { .susp_nod = 1 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 					    (union drbd_state) { { .susp_nod = 0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 					    CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (ns.susp_fen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			/* case2: The connection was established again: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 				clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			/* We should actively create a new uuid, _before_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			 * we resume/resent, if the peer is diskless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			 * (recovery from a multiple error scenario).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			 * Currently, this happens with a slight delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			 * below when checking lost_contact_to_peer_data() ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			_tl_restart(connection, RESEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			_conn_request_state(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 					    (union drbd_state) { { .susp_fen = 1 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 					    (union drbd_state) { { .susp_fen = 0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 					    CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	/* Became sync source.  With protocol >= 96, we still need to send out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	 * the sync uuid now. Need to do that before any drbd_send_state, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	 * the other side may go "paused sync" before receiving the sync uuids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	 * which is unexpected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	    (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	    connection->agreed_pro_version >= 96 && get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		drbd_gen_and_send_sync_uuid(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	/* Do not change the order of the if above and the two below... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	if (os.pdsk == D_DISKLESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	    ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) {      /* attach on the peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		/* we probably will start a resync soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		 * make sure those things are properly reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		device->rs_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		device->rs_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		atomic_set(&device->rs_pending_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		drbd_rs_cancel_all(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		drbd_send_uuids(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	/* No point in queuing send_bitmap if we don't have a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	 * anymore, so check also the _current_ state, not only the new state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	 * at the time this work was queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	    device->state.conn == C_WF_BITMAP_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		drbd_queue_bitmap_io(device, &drbd_send_bitmap, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 				"send_bitmap (WFBitMapS)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 				BM_LOCKED_TEST_ALLOWED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	/* Lost contact to peer's copy of the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		if (get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 				if (drbd_suspended(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 					set_bit(NEW_CUR_UUID, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 					drbd_uuid_new_current(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 					drbd_send_uuids(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		if (os.peer != R_PRIMARY && ns.peer == R_PRIMARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			drbd_uuid_new_current(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			drbd_send_uuids(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		/* D_DISKLESS Peer becomes secondary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			/* We may still be Primary ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			 * No harm done if the bitmap still changes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			 * redirtied pages will follow later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			drbd_bitmap_io_from_worker(device, &drbd_bm_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 				"demote diskless peer", BM_LOCKED_SET_ALLOWED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	/* Write out all changed bits on demote.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	 * Though, no need to da that just yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	 * if there is a resync going on still */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		device->state.conn <= C_CONNECTED && get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		/* No changes to the bitmap expected this time, so assert that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		 * even though no harm was done if it did change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		drbd_bitmap_io_from_worker(device, &drbd_bm_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 				"demote", BM_LOCKED_TEST_ALLOWED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	/* Last part of the attaching process ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	if (ns.conn >= C_CONNECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	    os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		drbd_send_sizes(peer_device, 0, 0);  /* to start sync... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		drbd_send_uuids(peer_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	/* We want to pause/continue resync, tell peer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	if (ns.conn >= C_CONNECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	     ((os.aftr_isp != ns.aftr_isp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	      (os.user_isp != ns.user_isp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/* In case one of the isp bits got set, suspend other devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	    (ns.aftr_isp || ns.peer_isp || ns.user_isp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		suspend_other_sg(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	/* Make sure the peer gets informed about eventual state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	   changes (ISP bits) while we were in WFReportParams. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	/* We are in the progress to start a full sync... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	    (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		/* no other bitmap changes expected during this phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		drbd_queue_bitmap_io(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			&drbd_bmio_set_n_write, &abw_start_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	/* first half of local IO error, failure to attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	 * or administrative detach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		enum drbd_io_error_p eh = EP_PASS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		int was_io_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		/* corresponding get_ldev was in _drbd_set_state, to serialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		 * our cleanup here with the transition to D_DISKLESS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		 * But is is still not save to dreference ldev here, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		 * we might come from an failed Attach before ldev was set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		if (device->ldev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			eh = rcu_dereference(device->ldev->disk_conf)->on_io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			/* Intentionally call this handler first, before drbd_send_state().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			 * See: 2932204 drbd: call local-io-error handler early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			 * People may chose to hard-reset the box from this handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			 * It is useful if this looks like a "regular node crash". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			if (was_io_error && eh == EP_CALL_HELPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 				drbd_khelper(device, "local-io-error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			/* Immediately allow completion of all application IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			 * that waits for completion from the local disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			 * if this was a force-detach due to disk_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			 * or administrator request (drbdsetup detach --force).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			 * Do NOT abort otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			 * Aborting local requests may cause serious problems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			 * if requests are completed to upper layers already,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			 * and then later the already submitted local bio completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			 * This can cause DMA into former bio pages that meanwhile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			 * have been re-used for other things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			 * So aborting local requests may cause crashes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			 * or even worse, silent data corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			if (test_and_clear_bit(FORCE_DETACH, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 				tl_abort_disk_io(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			/* current state still has to be D_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 			 * there is only one way out: to D_DISKLESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			 * and that may only happen after our put_ldev below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			if (device->state.disk != D_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 				drbd_err(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 					"ASSERT FAILED: disk is %s during detach\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 					drbd_disk_str(device->state.disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			if (ns.conn >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 				drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			drbd_rs_cancel_all(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			/* In case we want to get something to stable storage still,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			 * this may be the last chance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			 * Following put_ldev may transition to D_DISKLESS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	/* second half of local IO error, failure to attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	 * or administrative detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	 * after local_cnt references have reached zero again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		/* We must still be diskless,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		 * re-attach has to be serialized with this! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		if (device->state.disk != D_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			drbd_err(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 				 "ASSERT FAILED: disk is %s while going diskless\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 				 drbd_disk_str(device->state.disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		if (ns.conn >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		/* corresponding get_ldev in __drbd_set_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		 * this may finally trigger drbd_ldev_destroy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	/* Notify peer that I had a local IO error, and did not detached.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	/* Disks got bigger while they were detached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	    test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		if (ns.conn == C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			resync_after_online_grow(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	/* A resync finished or aborted, wake paused devices... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	    (os.peer_isp && !ns.peer_isp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	    (os.user_isp && !ns.user_isp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		resume_next_sg(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	/* sync target done with resync.  Explicitly notify peer, even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	 * it should (at least for non-empty resyncs) already know itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	/* Verify finished, or reached stop sector.  Peer did not know about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	 * the stop sector, and we may even have changed the stop sector during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	 * verify to interrupt/stop early.  Send the new state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	&& verify_can_do_stop_sector(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		drbd_send_state(peer_device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	/* This triggers bitmap writeout of potentially still unwritten pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	 * if the resync finished cleanly, or aborted because of peer disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	 * failure, or on transition from resync back to AHEAD/BEHIND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	 * Connection loss is handled in drbd_disconnected() by the receiver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	 * For resync aborted because of local disk failure, we cannot do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	 * any bitmap writeout anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	 * No harm done if some bits change during this phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	    (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	if (ns.disk == D_DISKLESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	    ns.conn == C_STANDALONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	    ns.role == R_SECONDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		if (os.aftr_isp != ns.aftr_isp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			resume_next_sg(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	drbd_md_sync(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) struct after_conn_state_chg_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	enum drbd_conns oc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	union drbd_state ns_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	union drbd_state ns_max; /* new, max state, over all devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	enum chg_state_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static int w_after_conn_state_ch(struct drbd_work *w, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	struct after_conn_state_chg_work *acscw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		container_of(w, struct after_conn_state_chg_work, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	struct drbd_connection *connection = acscw->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	enum drbd_conns oc = acscw->oc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	union drbd_state ns_max = acscw->ns_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	broadcast_state_change(acscw->state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	forget_state_change(acscw->state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	kfree(acscw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	/* Upon network configuration, we need to start the receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		drbd_thread_start(&connection->receiver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		struct net_conf *old_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		mutex_lock(&notification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			notify_peer_device_state(NULL, 0, peer_device, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 						 NOTIFY_DESTROY | NOTIFY_CONTINUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		notify_connection_state(NULL, 0, connection, NULL, NOTIFY_DESTROY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		mutex_unlock(&notification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		mutex_lock(&connection->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		old_conf = connection->net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		connection->my_addr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		connection->peer_addr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		RCU_INIT_POINTER(connection->net_conf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		conn_free_crypto(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		mutex_unlock(&connection->resource->conf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		kfree(old_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	if (ns_max.susp_fen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		/* case1: The outdate peer handler is successful: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		if (ns_max.pdsk <= D_OUTDATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 				struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 				if (test_bit(NEW_CUR_UUID, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 					drbd_uuid_new_current(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 					clear_bit(NEW_CUR_UUID, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 			spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 			_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			_conn_request_state(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 					    (union drbd_state) { { .susp_fen = 1 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 					    (union drbd_state) { { .susp_fen = 0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 					    CS_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 			spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	conn_md_sync(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	kref_put(&connection->kref, drbd_destroy_connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	enum chg_state_flags flags = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	int vnr, first_vol = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	union drbd_dev_state os, cs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		{ .role = R_SECONDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		  .peer = R_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		  .conn = connection->cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		  .disk = D_DISKLESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		  .pdsk = D_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		os = device->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		if (first_vol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 			cs = os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 			first_vol = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		if (cs.role != os.role)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			flags &= ~CS_DC_ROLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		if (cs.peer != os.peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			flags &= ~CS_DC_PEER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		if (cs.conn != os.conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 			flags &= ~CS_DC_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (cs.disk != os.disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			flags &= ~CS_DC_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		if (cs.pdsk != os.pdsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			flags &= ~CS_DC_PDSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	*pf |= CS_DC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	*pf &= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	(*pcs).i = cs.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			 enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	enum drbd_state_rv rv = SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	union drbd_state ns, os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	int vnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			ns.disk = os.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		if (ns.i == os.i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		rv = is_valid_transition(os, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		if (rv >= SS_SUCCESS && !(flags & CS_HARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 			rv = is_valid_state(device, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				if (is_valid_state(device, os) == rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 					rv = is_valid_soft_transition(os, ns, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 				rv = is_valid_soft_transition(os, ns, connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		if (rv < SS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			if (flags & CS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 				print_st_err(device, os, ns, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	       union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	union drbd_state ns, os, ns_max = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	union drbd_state ns_min = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		{ .role = R_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		  .peer = R_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		  .conn = val.conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		  .disk = D_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		  .pdsk = D_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	int vnr, number_of_volumes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	if (mask.conn == C_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		/* remember last connect time so request_timer_fn() won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		 * kill newly established sessions while we are still trying to thaw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		 * previously frozen IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			connection->last_reconnect_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		connection->cstate = val.conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		struct drbd_device *device = peer_device->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		number_of_volumes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		os = drbd_read_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		ns = apply_mask_val(os, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		ns = sanitize_state(device, os, ns, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			ns.disk = os.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		rv = _drbd_set_state(device, ns, flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		BUG_ON(rv < SS_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		ns.i = device->state.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		ns_max.role = max_role(ns.role, ns_max.role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		ns_max.peer = max_role(ns.peer, ns_max.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		ns_min.role = min_role(ns.role, ns_min.role);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		ns_min.peer = min_role(ns.peer, ns_min.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (number_of_volumes == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		ns_min = ns_max = (union drbd_state) { {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 				.role = R_SECONDARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 				.peer = R_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 				.conn = val.conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 				.disk = D_DISKLESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 				.pdsk = D_UNKNOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			} };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	ns_min.susp = ns_max.susp = connection->resource->susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	*pns_min = ns_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	*pns_max = ns_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) _conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		rv = SS_CW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		rv = SS_CW_FAILED_BY_PEER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	err = conn_is_valid_transition(connection, mask, val, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		    enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	enum drbd_state_rv rv = SS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	struct after_conn_state_chg_work *acscw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	enum drbd_conns oc = connection->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	union drbd_state ns_max, ns_min, os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	bool have_mutex = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	struct drbd_state_change *state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	if (mask.conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		rv = is_valid_conn_transition(oc, val.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	rv = conn_is_valid_transition(connection, mask, val, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	    !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		/* This will be a cluster-wide state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		 * Need to give up the spinlock, grab the mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		 * then send the state change request, ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		mutex_lock(&connection->cstate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		have_mutex = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		if (conn_send_state_req(connection, mask, val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			/* sending failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 			clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 			rv = SS_CW_FAILED_BY_PEER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			/* need to re-aquire the spin lock, though */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			goto abort_unlocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		if (val.conn == C_DISCONNECTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 			set_bit(DISCONNECT_SENT, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		/* ... and re-aquire the spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		 * conn_set_state() within the same spinlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		wait_event_lock_irq(connection->ping_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 				(rv = _conn_rq_cond(connection, mask, val)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 				connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		if (rv < SS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 			goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	state_change = remember_old_state(connection->resource, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	conn_old_common_state(connection, &os, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	flags |= CS_DC_SUSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	conn_pr_state_change(connection, os, ns_max, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	remember_new_state(state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	if (acscw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		acscw->oc = os.conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		acscw->ns_min = ns_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		acscw->ns_max = ns_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		acscw->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		acscw->w.cb = w_after_conn_state_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		kref_get(&connection->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		acscw->connection = connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		acscw->state_change = state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		drbd_queue_work(&connection->sender_work, &acscw->w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		drbd_err(connection, "Could not kmalloc an acscw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)  abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	if (have_mutex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		/* mutex_unlock() "... must not be used in interrupt context.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		 * so give up the spinlock, then re-aquire it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)  abort_unlocked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		mutex_unlock(&connection->cstate_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		drbd_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		drbd_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) enum drbd_state_rv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		   enum chg_state_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	enum drbd_state_rv rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	spin_lock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	rv = _conn_request_state(connection, mask, val, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	spin_unlock_irq(&connection->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }