Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) **  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "dlm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "lockspace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "ast.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "memory.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "rcom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "lock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "lowcomms.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "member.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "recover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * Recovery waiting routines: these functions wait for a particular reply from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * a remote node, or for the remote node to report a certain status.  They need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * to abort if the lockspace is stopped indicating a node has failed (perhaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * the one being waited for).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Wait until given function returns non-zero or lockspace is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes).  When another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * function thinks it could have completed the waited-on task, they should wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * up ls_wait_general to get an immediate response rather than waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * timeout.  This uses a timeout so it can check periodically if the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * should abort due to node failure (which doesn't cause a wake_up).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * This should only be called by the dlm_recoverd thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		rv = wait_event_timeout(ls->ls_wait_general,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 					testfn(ls) || dlm_recovery_stopped(ls),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 					dlm_config.ci_recover_timer * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			log_debug(ls, "dlm_wait_function timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (dlm_recovery_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		log_debug(ls, "dlm_wait_function aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * An efficient way for all nodes to wait for all others to have a certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * status.  The node with the lowest nodeid polls all the others for their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * status (wait_status_all) and all the others poll the node with the low id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * for its accumulated result (wait_status_low).  When all nodes have set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * status flag X, then status flag X_ALL will be set on the low nodeid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) uint32_t dlm_recover_status(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	spin_lock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	status = ls->ls_recover_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	spin_unlock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	ls->ls_recover_status |= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	spin_lock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	_set_recover_status(ls, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	spin_unlock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			   int save_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct dlm_rcom *rc = ls->ls_recover_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct dlm_member *memb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int error = 0, delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	list_for_each_entry(memb, &ls->ls_nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			if (dlm_recovery_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 				error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			error = dlm_rcom_status(ls, memb->nodeid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			if (save_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				dlm_slot_save(ls, rc, memb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			if (rc->rc_result & wait_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			if (delay < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 				delay += 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			msleep(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			   uint32_t status_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct dlm_rcom *rc = ls->ls_recover_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (dlm_recovery_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		error = dlm_rcom_status(ls, nodeid, status_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		if (rc->rc_result & wait_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		if (delay < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			delay += 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		msleep(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int wait_status(struct dlm_ls *ls, uint32_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	uint32_t status_all = status << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (ls->ls_low_nodeid == dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		error = wait_status_all(ls, status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			dlm_set_recover_status(ls, status_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		error = wait_status_low(ls, status_all, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int dlm_recover_members_wait(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct dlm_member *memb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct dlm_slot *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int num_slots, slots_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	int error, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	uint32_t gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	list_for_each_entry(memb, &ls->ls_nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		memb->slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		memb->generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (ls->ls_low_nodeid == dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		error = wait_status_all(ls, DLM_RS_NODES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		/* slots array is sparse, slots_size may be > num_slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			spin_lock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			_set_recover_status(ls, DLM_RS_NODES_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			ls->ls_num_slots = num_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			ls->ls_slots_size = slots_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			ls->ls_slots = slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			ls->ls_generation = gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			spin_unlock(&ls->ls_recover_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		dlm_slots_copy_in(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int dlm_recover_directory_wait(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return wait_status(ls, DLM_RS_DIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int dlm_recover_locks_wait(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return wait_status(ls, DLM_RS_LOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int dlm_recover_done_wait(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return wait_status(ls, DLM_RS_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * The recover_list contains all the rsb's for which we've requested the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * master nodeid.  As replies are returned from the resource directories the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * rsb's are removed from the list.  When the list is empty we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * The recover_list is later similarly used for all rsb's for which we've sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * new lkb's and need to receive new corresponding lkid's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * We use the address of the rsb struct as a simple local identifier for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * rsb so we can match an rcom reply with the rsb it was sent for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int recover_list_empty(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	int empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	spin_lock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	empty = list_empty(&ls->ls_recover_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	spin_unlock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void recover_list_add(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	spin_lock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (list_empty(&r->res_recover_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		ls->ls_recover_list_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		dlm_hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	spin_unlock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void recover_list_del(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	spin_lock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	list_del_init(&r->res_recover_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	ls->ls_recover_list_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	spin_unlock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	dlm_put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void recover_list_clear(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct dlm_rsb *r, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	spin_lock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		list_del_init(&r->res_recover_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		r->res_recover_locks_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		dlm_put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		ls->ls_recover_list_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (ls->ls_recover_list_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		log_error(ls, "warning: recover_list_count %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			  ls->ls_recover_list_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		ls->ls_recover_list_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	spin_unlock(&ls->ls_recover_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int recover_idr_empty(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	spin_lock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (ls->ls_recover_list_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	spin_unlock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int recover_idr_add(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	idr_preload(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	spin_lock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (r->res_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		rv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (rv < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	r->res_id = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	ls->ls_recover_list_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	dlm_hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	spin_unlock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static void recover_idr_del(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	spin_lock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	idr_remove(&ls->ls_recover_idr, r->res_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	r->res_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ls->ls_recover_list_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	spin_unlock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	dlm_put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	spin_lock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	r = idr_find(&ls->ls_recover_idr, (int)id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	spin_unlock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void recover_idr_clear(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	spin_lock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	idr_for_each_entry(&ls->ls_recover_idr, r, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		idr_remove(&ls->ls_recover_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		r->res_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		r->res_recover_locks_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		ls->ls_recover_list_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		dlm_put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (ls->ls_recover_list_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		log_error(ls, "warning: recover_list_count %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			  ls->ls_recover_list_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		ls->ls_recover_list_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	spin_unlock(&ls->ls_recover_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Master recovery: find new master node for rsb's that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)    mastered on nodes that have been removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)    dlm_recover_masters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)    recover_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)    dlm_send_rcom_lookup            ->  receive_rcom_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)                                        dlm_dir_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)    receive_rcom_lookup_reply       <-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)    dlm_recover_master_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)    set_new_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)    set_master_lkbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)    set_lock_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * Set the lock master for all LKBs in a lock queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * If we are the new master of the rsb, we may have received new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * MSTCPY locks from other nodes already which we need to ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * when setting the new nodeid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static void set_lock_master(struct list_head *queue, int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	list_for_each_entry(lkb, queue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			lkb->lkb_nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			lkb->lkb_remid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void set_master_lkbs(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	set_lock_master(&r->res_grantqueue, r->res_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	set_lock_master(&r->res_convertqueue, r->res_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	set_lock_master(&r->res_waitqueue, r->res_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * Propagate the new master nodeid to locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * rsb's to consider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void set_new_master(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	set_master_lkbs(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	rsb_set_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	rsb_set_flag(r, RSB_NEW_MASTER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  * We do async lookups on rsb's that need new masters.  The rsb's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  * waiting for a lookup reply are kept on the recover_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * Another node recovering the master may have sent us a rcom lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  * and our dlm_master_lookup() set it as the new master, along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * equals our_nodeid below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int recover_master(struct dlm_rsb *r, unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	int our_nodeid, dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	int is_removed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	if (is_master(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	is_removed = dlm_is_removed(ls, r->res_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	dir_nodeid = dlm_dir_nodeid(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	if (dir_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		if (is_removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		/* set master of lkbs to ourself when is_removed, or to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		   another new master which we set along with NEW_MASTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		   in dlm_master_lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		set_new_master(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		recover_idr_add(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		error = dlm_send_rcom_lookup(r, dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	(*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * This is necessary because recovery can be started, aborted and restarted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * causing the master nodeid to briefly change during the aborted recovery, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * change back to the original value in the second recovery.  The MSTCPY locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  * may or may not have been purged during the aborted recovery.  Another node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  * with an outstanding request in waiters list and a request reply saved in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * requestqueue, cannot know whether it should ignore the reply and resend the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * request, or accept the reply and complete the request.  It must do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * former if the remote node purged MSTCPY locks, and it must do the later if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * the remote node did not.  This is solved by always purging MSTCPY locks, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * which case, the request reply would always be ignored and the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	int dir_nodeid = dlm_dir_nodeid(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int new_master = dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (dir_nodeid == dlm_our_nodeid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		new_master = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	dlm_purge_mstcpy_locks(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	r->res_master_nodeid = dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	r->res_nodeid = new_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	set_new_master(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	(*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  * Go through local root resources and for each rsb which has a master which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  * has departed, get the new master nodeid from the directory.  The dir will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * assign mastery to the first node to look up the new master.  That means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * we'll discover in this lookup if we're the new master of any rsb's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * We fire off all the dir lookup requests individually and asynchronously to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  * the correct dir node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int dlm_recover_masters(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	unsigned int total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	int nodir = dlm_no_directory(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	log_rinfo(ls, "dlm_recover_masters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	down_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		if (dlm_recovery_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		if (nodir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			error = recover_master_static(r, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			error = recover_master(r, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	error = dlm_wait_function(ls, &recover_idr_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		recover_idr_clear(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	int ret_nodeid, new_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	r = recover_idr_find(ls, rc->rc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		log_error(ls, "dlm_recover_master_reply no id %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			  (unsigned long long)rc->rc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	ret_nodeid = rc->rc_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	if (ret_nodeid == dlm_our_nodeid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		new_master = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		new_master = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	r->res_master_nodeid = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	r->res_nodeid = new_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	set_new_master(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	recover_idr_del(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (recover_idr_empty(ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		wake_up(&ls->ls_wait_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Lock recovery: rebuild the process-copy locks we hold on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)    remastered rsb on the new rsb master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)    dlm_recover_locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)    recover_locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)    recover_locks_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)    dlm_send_rcom_lock              ->  receive_rcom_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)                                        dlm_recover_master_copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)    receive_rcom_lock_reply         <-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)    dlm_recover_process_copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)  * keep a count of the number of lkb's we send to the new master; when we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)  * an equal number of replies then recovery for the rsb is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	list_for_each_entry(lkb, head, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	   	error = dlm_send_rcom_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		r->res_recover_locks_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int recover_locks(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	error = recover_locks_queue(r, &r->res_grantqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	error = recover_locks_queue(r, &r->res_convertqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	error = recover_locks_queue(r, &r->res_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (r->res_recover_locks_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		recover_list_add(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		rsb_clear_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int dlm_recover_locks(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	int error, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	down_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		if (is_master(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			rsb_clear_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		if (!rsb_flag(r, RSB_NEW_MASTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		if (dlm_recovery_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 			error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		error = recover_locks(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 			up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		count += r->res_recover_locks_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	log_rinfo(ls, "dlm_recover_locks %d out", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	error = dlm_wait_function(ls, &recover_list_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		recover_list_clear(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) void dlm_recovered_lock(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	r->res_recover_locks_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	if (!r->res_recover_locks_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		rsb_clear_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		recover_list_del(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (recover_list_empty(r->res_ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		wake_up(&r->res_ls->ls_wait_general);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)  * The lvb needs to be recovered on all master rsb's.  This includes setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)  * the VALNOTVALID flag if necessary, and determining the correct lvb contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)  * based on the lvb's of the locks held on the rsb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  * RSB_VALNOTVALID is set in two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)  * 1. we are master, but not new, and we purged an EX/PW lock held by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)  * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)  * 2. we are a new master, and there are only NL/CR locks left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)  * (We could probably improve this by only invaliding in this way when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)  * the previous master left uncleanly.  VMS docs mention that.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)  * The LVB contents are only considered for changing when this is a new master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)  * of the rsb (NEW_MASTER2).  Then, the rsb's lvb is taken from any lkb with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)  * mode > CR.  If no lkb's exist with mode above CR, the lvb contents are taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)  * from the lkb with the largest lvb sequence number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void recover_lvb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	struct dlm_lkb *lkb, *high_lkb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	uint32_t high_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	int lock_lvb_exists = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	int big_lock_exists = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	int lvblen = r->res_ls->ls_lvblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	if (!rsb_flag(r, RSB_NEW_MASTER2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	    rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		/* case 1 above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		rsb_set_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	if (!rsb_flag(r, RSB_NEW_MASTER2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	/* we are the new master, so figure out if VALNOTVALID should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	   be set, and set the rsb lvb from the best lkb available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		lock_lvb_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		if (lkb->lkb_grmode > DLM_LOCK_CR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			big_lock_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 			goto setflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 			high_lkb = lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			high_seq = lkb->lkb_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		lock_lvb_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		if (lkb->lkb_grmode > DLM_LOCK_CR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 			big_lock_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 			goto setflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 			high_lkb = lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 			high_seq = lkb->lkb_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)  setflag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	if (!lock_lvb_exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	/* lvb is invalidated if only NL/CR locks remain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	if (!big_lock_exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		rsb_set_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	if (!r->res_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	if (big_lock_exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		r->res_lvbseq = lkb->lkb_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	} else if (high_lkb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 		r->res_lvbseq = high_lkb->lkb_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		r->res_lvbseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		memset(r->res_lvbptr, 0, lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* All master rsb's flagged RECOVER_CONVERT need to be looked at.  The locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)    converting PR->CW or CW->PR need to have their lkb_grmode set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void recover_conversion(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	int grmode = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		if (lkb->lkb_grmode == DLM_LOCK_PR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		    lkb->lkb_grmode == DLM_LOCK_CW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 			grmode = lkb->lkb_grmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		if (lkb->lkb_grmode != DLM_LOCK_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		if (grmode == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			log_debug(ls, "recover_conversion %x set gr to rq %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 				  lkb->lkb_id, lkb->lkb_rqmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 			lkb->lkb_grmode = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 			log_debug(ls, "recover_conversion %x set gr %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 				  lkb->lkb_id, grmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 			lkb->lkb_grmode = grmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* We've become the new master for this rsb and waiting/converting locks may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)    need to be granted in dlm_recover_grant() due to locks that may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)    existed from a removed node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void recover_grant(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		rsb_set_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) void dlm_recover_rsbs(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	down_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 		lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 		if (is_master(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 			if (rsb_flag(r, RSB_RECOVER_CONVERT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 				recover_conversion(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 			/* recover lvb before granting locks so the updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 			   lvb/VALNOTVALID is presented in the completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 			recover_lvb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 			if (rsb_flag(r, RSB_NEW_MASTER2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 				recover_grant(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 			rsb_clear_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 		rsb_clear_flag(r, RSB_RECOVER_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 		rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		rsb_clear_flag(r, RSB_NEW_MASTER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 		unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	up_read(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 		log_rinfo(ls, "dlm_recover_rsbs %d done", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Create a single list of all root rsb's to be used during recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int dlm_create_root_list(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 	int i, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 	down_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 	if (!list_empty(&ls->ls_root_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 		log_error(ls, "root list not empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 		error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 		spin_lock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 		for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 			list_add(&r->res_root_list, &ls->ls_root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 			dlm_hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 		if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 			log_error(ls, "dlm_create_root_list toss not empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 		spin_unlock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	up_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) void dlm_release_root_list(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	struct dlm_rsb *r, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 	down_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 		list_del_init(&r->res_root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 		dlm_put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 	up_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) void dlm_clear_toss(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	struct rb_node *n, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 	struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 	unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 		spin_lock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 		for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 			next = rb_next(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 			r = rb_entry(n, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) 			rb_erase(n, &ls->ls_rsbtbl[i].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 			dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 		spin_unlock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 		log_rinfo(ls, "dlm_clear_toss %u done", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)