Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * dlmthread.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * standalone DLM module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2004 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "../cluster/heartbeat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "../cluster/nodemanager.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "../cluster/tcp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "dlmapi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include "dlmcommon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include "dlmdomain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include "../cluster/masklog.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static int dlm_thread(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static void dlm_flush_asts(struct dlm_ctxt *dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* will exit holding res->spinlock, but may drop in function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* waits until flags are cleared on res->state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	add_wait_queue(&res->wq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (res->state & flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	remove_wait_queue(&res->wq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (list_empty(&res->granted) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	    list_empty(&res->converting) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	    list_empty(&res->blocked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /* "unused": the lockres has no locks, is not on the dirty list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * has no inflight locks (in the gap between mastery and acquiring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * the first lock), and has no bits in its refmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * truly ready to be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) int __dlm_lockres_unused(struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (__dlm_lockres_has_locks(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/* Locks are in the process of being created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (res->inflight_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (res->state & (DLM_LOCK_RES_RECOVERING|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			DLM_LOCK_RES_RECOVERY_WAITING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* Another node has this resource with this node as the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (bit < O2NM_MAX_NODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Call whenever you may have added or deleted something from one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * the lockres queue's. This will figure out whether it belongs on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * unused list or not and does the appropriate thing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			      struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	assert_spin_locked(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (__dlm_lockres_unused(res)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (list_empty(&res->purge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			mlog(0, "%s: Adding res %.*s to purge list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			res->last_used = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			dlm_lockres_get(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			list_add_tail(&res->purge, &dlm->purge_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			dlm->purge_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	} else if (!list_empty(&res->purge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		mlog(0, "%s: Removing res %.*s from purge list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		list_del_init(&res->purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		dlm->purge_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			    struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	__dlm_lockres_calc_usage(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * Do the real purge work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *     unhash the lockres, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  *     clear flag DLM_LOCK_RES_DROPPING_REF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * It requires dlm and lockres spinlock to be taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	assert_spin_locked(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!list_empty(&res->purge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		mlog(0, "%s: Removing res %.*s from purgelist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		list_del_init(&res->purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		dlm->purge_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (!__dlm_lockres_unused(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		__dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	__dlm_unhash_lockres(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	spin_lock(&dlm->track_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (!list_empty(&res->tracking))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		list_del_init(&res->tracking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		__dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	spin_unlock(&dlm->track_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	 * lockres is not in the hash now. drop the flag and wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	 * any processes waiting in dlm_get_lock_resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	res->state &= ~DLM_LOCK_RES_DROPPING_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void dlm_purge_lockres(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			     struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	int master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	assert_spin_locked(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	master = (res->owner == dlm->node_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	     res->lockname.len, res->lockname.name, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (!master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		if (res->state & DLM_LOCK_RES_DROPPING_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 				dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		res->state |= DLM_LOCK_RES_DROPPING_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		/* drop spinlock...  retake below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		/* This ensures that clear refmap is sent after the set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		/* clear our bit from the master's refmap, ignore errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		ret = dlm_drop_lockres_ref(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			if (!dlm_is_host_down(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!list_empty(&res->purge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		     dlm->name, res->lockname.len, res->lockname.name, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		list_del_init(&res->purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		dlm->purge_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		mlog(0, "%s: deref %.*s in progress\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (!__dlm_lockres_unused(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		__dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	__dlm_unhash_lockres(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	spin_lock(&dlm->track_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (!list_empty(&res->tracking))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		list_del_init(&res->tracking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		__dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	spin_unlock(&dlm->track_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	/* lockres is not in the hash now.  drop the flag and wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * any processes waiting in dlm_get_lock_resource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (!master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		res->state &= ~DLM_LOCK_RES_DROPPING_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		wake_up(&res->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void dlm_run_purge_list(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			       int purge_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned int run_max, unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned long purge_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	struct dlm_lock_resource *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	run_max = dlm->purge_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	while(run_max && !list_empty(&dlm->purge_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		run_max--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		lockres = list_entry(dlm->purge_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				     struct dlm_lock_resource, purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		spin_lock(&lockres->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		purge_jiffies = lockres->last_used +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		/* Make sure that we want to be processing this guy at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		 * this time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		if (!purge_now && time_after(purge_jiffies, jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			/* Since resources are added to the purge list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			 * in tail order, we can stop at the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			 * unpurgable resource -- anyone added after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			 * him will have a greater last_used value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			spin_unlock(&lockres->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		/* Status of the lockres *might* change so double
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		 * check. If the lockres is unused, holding the dlm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		 * spinlock will prevent people from getting and more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		 * refs on it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		unused = __dlm_lockres_unused(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		if (!unused ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		    (lockres->state & DLM_LOCK_RES_MIGRATING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		    (lockres->inflight_assert_workers != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			mlog(0, "%s: res %.*s is in use or being remastered, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			     "used %d, state %d, assert master workers %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			     dlm->name, lockres->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			     lockres->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			     !unused, lockres->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			     lockres->inflight_assert_workers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			list_move_tail(&lockres->purge, &dlm->purge_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			spin_unlock(&lockres->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		dlm_lockres_get(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		dlm_purge_lockres(dlm, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		dlm_lockres_put(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		/* Avoid adding any scheduling latencies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		cond_resched_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			      struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct dlm_lock *lock, *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	int can_grant = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * Because this function is called with the lockres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 * spinlock, and because we know that it is not migrating/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	 * recovering/in-progress, it is fine to reserve asts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	 * basts right before queueing them all throughout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	assert_spin_locked(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			      DLM_LOCK_RES_RECOVERING|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			      DLM_LOCK_RES_IN_PROGRESS)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) converting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (list_empty(&res->converting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		goto blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	     res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	target = list_entry(res->converting.next, struct dlm_lock, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (target->ml.convert_type == LKM_IVMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		     dlm->name, res->lockname.len, res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	list_for_each_entry(lock, &res->granted, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		if (lock==target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		if (!dlm_lock_compatible(lock->ml.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 					 target->ml.convert_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			can_grant = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			/* queue the BAST if not already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			if (lock->ml.highest_blocked == LKM_IVMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				__dlm_queue_bast(dlm, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			/* update the highest_blocked if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			if (lock->ml.highest_blocked < target->ml.convert_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 				lock->ml.highest_blocked =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					target->ml.convert_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	list_for_each_entry(lock, &res->converting, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		if (lock==target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (!dlm_lock_compatible(lock->ml.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 					 target->ml.convert_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			can_grant = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			if (lock->ml.highest_blocked == LKM_IVMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 				__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 				__dlm_queue_bast(dlm, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			if (lock->ml.highest_blocked < target->ml.convert_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				lock->ml.highest_blocked =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 					target->ml.convert_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	/* we can convert the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (can_grant) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		spin_lock(&target->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		     res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		     target->ml.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		     target->ml.convert_type, target->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		target->ml.type = target->ml.convert_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		target->ml.convert_type = LKM_IVMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		list_move_tail(&target->list, &res->granted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		BUG_ON(!target->lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		target->lksb->status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		spin_unlock(&target->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		__dlm_queue_ast(dlm, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		/* go back and check for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		goto converting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) blocked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (list_empty(&res->blocked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	target = list_entry(res->blocked.next, struct dlm_lock, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	list_for_each_entry(lock, &res->granted, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		if (lock==target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			can_grant = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			if (lock->ml.highest_blocked == LKM_IVMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				__dlm_queue_bast(dlm, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			if (lock->ml.highest_blocked < target->ml.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 				lock->ml.highest_blocked = target->ml.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	list_for_each_entry(lock, &res->converting, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		if (lock==target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			can_grant = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			if (lock->ml.highest_blocked == LKM_IVMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 				__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 				__dlm_queue_bast(dlm, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			if (lock->ml.highest_blocked < target->ml.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 				lock->ml.highest_blocked = target->ml.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	/* we can grant the blocked lock (only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	 * possible if converting list empty) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (can_grant) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		spin_lock(&target->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		     "node %u\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		     res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		     target->ml.type, target->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		/* target->ml.type is already correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		list_move_tail(&target->list, &res->granted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		BUG_ON(!target->lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		target->lksb->status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		spin_unlock(&target->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		__dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		__dlm_queue_ast(dlm, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		/* go back and check for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		goto converting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* must have NO locks when calling this with res !=NULL * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		__dlm_dirty_lockres(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	wake_up(&dlm->dlm_thread_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	assert_spin_locked(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	assert_spin_locked(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	/* don't shuffle secondary queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (res->owner == dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		if (res->state & (DLM_LOCK_RES_MIGRATING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 				  DLM_LOCK_RES_BLOCK_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		    return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		if (list_empty(&res->dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			/* ref for dirty_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			dlm_lockres_get(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			list_add_tail(&res->dirty, &dlm->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			res->state |= DLM_LOCK_RES_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	     res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Launch the NM thread for the mounted volume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int dlm_launch_thread(struct dlm_ctxt *dlm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	mlog(0, "Starting dlm_thread...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			dlm->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (IS_ERR(dlm->dlm_thread_task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		mlog_errno(PTR_ERR(dlm->dlm_thread_task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		dlm->dlm_thread_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void dlm_complete_thread(struct dlm_ctxt *dlm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (dlm->dlm_thread_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		kthread_stop(dlm->dlm_thread_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		dlm->dlm_thread_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	int empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	empty = list_empty(&dlm->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static void dlm_flush_asts(struct dlm_ctxt *dlm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	struct dlm_lock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	struct dlm_lock_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	u8 hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	spin_lock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	while (!list_empty(&dlm->pending_asts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		lock = list_entry(dlm->pending_asts.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 				  struct dlm_lock, ast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		/* get an extra ref on lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		dlm_lock_get(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		res = lock->lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		     "node %u\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		     res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		     lock->ml.type, lock->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		BUG_ON(!lock->ast_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		/* remove from list (including ref) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		list_del_init(&lock->ast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		spin_unlock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		if (lock->ml.node != dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 			ret = dlm_do_remote_ast(dlm, res, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 				mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			dlm_do_local_ast(dlm, res, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		spin_lock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		/* possible that another ast was queued while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		 * we were delivering the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		if (!list_empty(&lock->ast_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			mlog(0, "%s: res %.*s, AST queued while flushing last "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			     "one\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 			     res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			lock->ast_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		/* drop the extra ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		 * this may drop it completely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		dlm_lockres_release_ast(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	while (!list_empty(&dlm->pending_basts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		lock = list_entry(dlm->pending_basts.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 				  struct dlm_lock, bast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		/* get an extra ref on lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		dlm_lock_get(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		res = lock->lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		BUG_ON(!lock->bast_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		/* get the highest blocked lock, and reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		spin_lock(&lock->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		hi = lock->ml.highest_blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		lock->ml.highest_blocked = LKM_IVMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		spin_unlock(&lock->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		/* remove from list (including ref) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		list_del_init(&lock->bast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		spin_unlock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		     "blocked %d, node %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		     dlm->name, res->lockname.len, res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		     hi, lock->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		if (lock->ml.node != dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 				mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			dlm_do_local_bast(dlm, res, lock, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		spin_lock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		/* possible that another bast was queued while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		 * we were delivering the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		if (!list_empty(&lock->bast_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			mlog(0, "%s: res %.*s, BAST queued while flushing last "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			     "one\n", dlm->name, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			     res->lockname.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			lock->bast_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		/* drop the extra ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		 * this may drop it completely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		dlm_lockres_release_ast(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	wake_up(&dlm->ast_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	spin_unlock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #define DLM_THREAD_MAX_DIRTY  100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int dlm_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct dlm_lock_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct dlm_ctxt *dlm = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	mlog(0, "dlm thread running for %s...\n", dlm->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		int n = DLM_THREAD_MAX_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		/* dlm_shutting_down is very point-in-time, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		 * doesn't matter as we'll just loop back around if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		 * get false on the leading edge of a state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		 * transition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		/* We really don't want to hold dlm->spinlock while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		 * calling dlm_shuffle_lists on each lockres that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		 * needs to have its queues adjusted and AST/BASTs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		 * run.  So let's pull each entry off the dirty_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		 * and drop dlm->spinlock ASAP.  Once off the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		 * res->spinlock needs to be taken again to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		 * the queues while calling dlm_shuffle_lists.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		while (!list_empty(&dlm->dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 			int delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 			res = list_entry(dlm->dirty_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 					 struct dlm_lock_resource, dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 			/* peel a lockres off, remove it from the list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 			 * unset the dirty flag and drop the dlm lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 			BUG_ON(!res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 			dlm_lockres_get(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 			spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 			/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 			list_del_init(&res->dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 			spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 			spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 			/* Drop dirty_list ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 			dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		 	/* lockres can be re-dirtied/re-added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			 * dirty_list in this gap, but that is ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			spin_lock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 			spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 			if (res->owner != dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 				__dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 				     " dirty %d\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 				     !!(res->state & DLM_LOCK_RES_MIGRATING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 				     !!(res->state & DLM_LOCK_RES_RECOVERING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 				     !!(res->state & DLM_LOCK_RES_DIRTY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 			BUG_ON(res->owner != dlm->node_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 			/* it is now ok to move lockreses in these states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 			 * to the dirty list, assuming that they will only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 			 * dirty for a short while. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 			BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 			if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 					  DLM_LOCK_RES_RECOVERING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 					  DLM_LOCK_RES_RECOVERY_WAITING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 				/* move it to the tail and keep going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 				res->state &= ~DLM_LOCK_RES_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 				spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 				spin_unlock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 				mlog(0, "%s: res %.*s, inprogress, delay list "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 				     "shuffle, state %d\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 				     res->lockname.len, res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 				     res->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 				delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 				goto in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 			/* at this point the lockres is not migrating/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			 * recovering/in-progress.  we have the lockres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 			 * spinlock and do NOT have the dlm lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 			 * safe to reserve/queue asts and run the lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			/* called while holding lockres lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 			dlm_shuffle_lists(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			res->state &= ~DLM_LOCK_RES_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			spin_unlock(&dlm->ast_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			dlm_lockres_calc_usage(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) in_progress:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 			spin_lock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 			/* if the lock was in-progress, stick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 			 * it on the back of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 			if (delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 				spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 				__dlm_dirty_lockres(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 				spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 			dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 			/* unlikely, but we may need to give time to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			 * other tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			if (!--n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 				mlog(0, "%s: Throttling dlm thread\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 				     dlm->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		spin_unlock(&dlm->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 		dlm_flush_asts(dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		/* yield and continue right away if there is more work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		wait_event_interruptible_timeout(dlm->dlm_thread_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 						 !dlm_dirty_list_empty(dlm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 						 kthread_should_stop(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 						 timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	mlog(0, "quitting DLM thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }