Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * userdlm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Code which implements the kernel side of a minimal userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * interface to our DLM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Many of the functions here are pared down versions of dlmglue.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "../ocfs2_lockingver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "../stackglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "userdlm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define MLOG_MASK_PREFIX ML_DLMFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "../cluster/masklog.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	return container_of(lksb, struct user_lock_res, l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static inline int user_check_wait_flag(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 				       int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ret = lockres->l_flags & flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	wait_event(lockres->l_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		   !user_check_wait_flag(lockres, USER_LOCK_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	wait_event(lockres->l_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		   !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /* I heart container_of... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static inline struct ocfs2_cluster_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) cluster_connection_from_user_lockres(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct dlmfs_inode_private *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	ip = container_of(lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			  struct dlmfs_inode_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			  ip_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	return ip->ip_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static struct inode *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct dlmfs_inode_private *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ip = container_of(lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			  struct dlmfs_inode_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			  ip_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return &ip->ip_vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	lockres->l_flags &= ~USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define user_log_dlm_error(_func, _stat, _lockres) do {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mlog(ML_ERROR, "Dlm error %d while calling %s on "		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		"resource %.*s\n", _stat, _func,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		_lockres->l_namelen, _lockres->l_name); 		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /* WARNING: This function lives in a world where the only three lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * levels are EX, PR, and NL. It *will* have to be adjusted when more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * lock types are added. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline int user_highest_compat_lock_level(int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	int new_level = DLM_LOCK_EX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (level == DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		new_level = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	else if (level == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		new_level = DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void user_ast(struct ocfs2_dlm_lksb *lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	     lockres->l_namelen, lockres->l_name, lockres->l_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	     lockres->l_requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	status = ocfs2_dlm_lock_status(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		     status, lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			"Lockres %.*s, requested ivmode. flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			lockres->l_namelen, lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* we're downconverting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (lockres->l_requested < lockres->l_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (lockres->l_requested <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		    user_highest_compat_lock_level(lockres->l_blocking)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			lockres->l_blocking = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			lockres->l_flags &= ~USER_LOCK_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	lockres->l_level = lockres->l_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	lockres->l_requested = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	lockres->l_flags |= USER_LOCK_ATTACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	lockres->l_flags &= ~USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	inode = user_dlm_inode_from_user_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (!igrab(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void user_dlm_unblock_lock(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		user_dlm_grab_inode_ref(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		queue_work(user_dlm_worker, &lockres->l_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		lockres->l_flags |= USER_LOCK_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	int queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (!(lockres->l_flags & USER_LOCK_BLOCKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	switch (lockres->l_blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		if (!lockres->l_ex_holders && !lockres->l_ro_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		if (!lockres->l_ex_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		__user_dlm_queue_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	     lockres->l_namelen, lockres->l_name, level, lockres->l_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	lockres->l_flags |= USER_LOCK_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (level > lockres->l_blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		lockres->l_blocking = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	__user_dlm_queue_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	     lockres->l_namelen, lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		mlog(ML_ERROR, "dlm returns status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/* The teardown flag gets set early during the unlock process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * so test the cancel flag to make sure that this ast isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * for a concurrent cancel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	    && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		lockres->l_level = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	} else if (status == DLM_CANCELGRANT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		/* We tried to cancel a convert request, but it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		 * already granted. Don't clear the busy flag - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		 * ast should've done this already. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		goto out_noclear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		/* Cancel succeeded, we want to re-queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		lockres->l_requested = DLM_LOCK_IV; /* cancel an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 						    * upconvert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 						    * request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		/* we want the unblock thread to look at it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		 * now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		if (lockres->l_flags & USER_LOCK_BLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			__user_dlm_queue_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	lockres->l_flags &= ~USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) out_noclear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * This is the userdlmfs locking protocol version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * See fs/ocfs2/dlmglue.c for more details on locking versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct ocfs2_locking_protocol user_dlm_lproto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	.lp_max_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	.lp_lock_ast		= user_ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	.lp_blocking_ast	= user_bast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	.lp_unlock_ast		= user_unlock_ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	inode = user_dlm_inode_from_user_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void user_dlm_unblock_lock(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int new_level, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	struct user_lock_res *lockres =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		container_of(work, struct user_lock_res, l_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct ocfs2_cluster_connection *conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		cluster_connection_from_user_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			"Lockres %.*s, flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			lockres->l_namelen, lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/* notice that we don't clear USER_LOCK_BLOCKED here. If it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * set, we want user_ast clear it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	lockres->l_flags &= ~USER_LOCK_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/* It's valid to get here and no longer be blocked - if we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * several basts in a row, we might be queued by the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * one, the unblock thread might run and clear the queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * flag, and finally we might get another bast which re-queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * us before our ast for the downconvert is called. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		     lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		     lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (lockres->l_flags & USER_LOCK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			     lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		lockres->l_flags |= USER_LOCK_IN_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 					  DLM_LKF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	/* If there are still incompat holders, we can exit safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	 * without worrying about re-queueing this lock as that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	 * happen on the last call to user_cluster_unlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if ((lockres->l_blocking == DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		     lockres->l_namelen, lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		     lockres->l_ex_holders, lockres->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if ((lockres->l_blocking == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	    && lockres->l_ex_holders) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		     lockres->l_namelen, lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		     lockres->l_ex_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		goto drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/* yay, we can downconvert now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	new_level = user_highest_compat_lock_level(lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	lockres->l_requested = new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	lockres->l_flags |= USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	     lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/* need lock downconvert request now... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 				DLM_LKF_CONVERT|DLM_LKF_VALBLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 				lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				lockres->l_namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		user_recover_from_dlm_error(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) drop_ref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	user_dlm_drop_inode_ref(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	switch(level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		lockres->l_ex_holders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		lockres->l_ro_holders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* predict what lock level we'll be dropping down to on behalf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * of another node, and return true if the currently wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * level will be compatible with it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 				  int wanted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int user_dlm_cluster_lock(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			  int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			  int lkm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	int status, local_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	struct ocfs2_cluster_connection *conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		cluster_connection_from_user_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (level != DLM_LOCK_EX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	    level != DLM_LOCK_PR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		     lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	     lockres->l_namelen, lockres->l_name, level, lkm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		status = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	/* We only compare against the currently granted level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	 * here. If the lock is blocked waiting on a downconvert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	 * we'll get caught below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if ((lockres->l_flags & USER_LOCK_BUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	    (level > lockres->l_level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		/* is someone sitting in dlm_lock? If so, wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		 * them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		user_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	    (!user_may_continue_on_blocked_lock(lockres, level))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		/* is the lock is currently blocked on behalf of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		 * another node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		user_wait_on_blocked_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	if (level > lockres->l_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		local_flags = lkm_flags | DLM_LKF_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		if (lockres->l_level != DLM_LOCK_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			local_flags |= DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		lockres->l_requested = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		lockres->l_flags |= USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		BUG_ON(level == DLM_LOCK_IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		BUG_ON(level == DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		/* call dlm_lock to upgrade lock now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 					local_flags, lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 					lockres->l_namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			if ((lkm_flags & DLM_LKF_NOQUEUE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			    (status != -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				user_log_dlm_error("ocfs2_dlm_lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 						   status, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			user_recover_from_dlm_error(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		user_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	user_dlm_inc_holders(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 					int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	switch(level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		BUG_ON(!lockres->l_ex_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		lockres->l_ex_holders--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		BUG_ON(!lockres->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		lockres->l_ro_holders--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void user_dlm_cluster_unlock(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			     int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (level != DLM_LOCK_EX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	    level != DLM_LOCK_PR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		     lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	user_dlm_dec_holders(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	__user_dlm_cond_queue_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void user_dlm_write_lvb(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	char *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	BUG_ON(len > DLM_LVB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	BUG_ON(lockres->l_level < DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	memcpy(lvb, val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) bool user_dlm_read_lvb(struct inode *inode, char *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	char *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	BUG_ON(lockres->l_level < DLM_LOCK_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		memcpy(val, lvb, DLM_LVB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) void user_dlm_lock_res_init(struct user_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			    struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	memset(lockres, 0, sizeof(*lockres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	spin_lock_init(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	init_waitqueue_head(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	lockres->l_level = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	lockres->l_requested = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	lockres->l_blocking = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	/* should have been checked before getting here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	memcpy(lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	       dentry->d_name.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	       dentry->d_name.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	lockres->l_namelen = dentry->d_name.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int user_dlm_destroy_lock(struct user_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	int status = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	struct ocfs2_cluster_connection *conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		cluster_connection_from_user_lockres(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	while (lockres->l_flags & USER_LOCK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		user_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (lockres->l_ro_holders || lockres->l_ex_holders) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	lockres->l_flags &= ~USER_LOCK_ATTACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	lockres->l_flags |= USER_LOCK_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	user_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void user_dlm_recovery_handler_noop(int node_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 					   void *recovery_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/* We ignore recovery events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void user_dlm_set_locking_protocol(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	struct ocfs2_cluster_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 					    &user_dlm_lproto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 					    user_dlm_recovery_handler_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 					    NULL, &conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	return rc ? ERR_PTR(rc) : conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	ocfs2_cluster_disconnect(conn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }