Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * dlmglue.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Code which implements an OCFS2 specific interface to our DLM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2003, 2004 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define MLOG_MASK_PREFIX ML_DLM_GLUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "ocfs2_lockingver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "dcache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "file.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "heartbeat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "journal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "stackglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "slot_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "uptodate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "refcounttree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) struct ocfs2_mask_waiter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	struct list_head	mw_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	int			mw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct completion	mw_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	unsigned long		mw_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	unsigned long		mw_goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #ifdef CONFIG_OCFS2_FS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	ktime_t			mw_lock_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * Return value from ->downconvert_worker functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * These control the precise actions of ocfs2_unblock_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * and ocfs2_process_blocked_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) enum ocfs2_unblock_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	UNBLOCK_CONTINUE	= 0, /* Continue downconvert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	UNBLOCK_CONTINUE_POST	= 1, /* Continue downconvert, fire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 				      * ->post_unlock callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	UNBLOCK_STOP_POST	= 2, /* Do not downconvert, fire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 				      * ->post_unlock() callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) struct ocfs2_unblock_ctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	int requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	enum ocfs2_unblock_action unblock_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) /* Lockdep class keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 					int new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 				     int blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 				       int blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 				     struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 					    int new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 					 int blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) /* This aids in debugging situations where a bad LVB might be involved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) static void ocfs2_dump_meta_lvb_info(u64 level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 				     const char *function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				     unsigned int line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 				     struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	mlog(level, "LVB information for %s (called from %s:%u):\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	     lockres->l_name, function, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	     lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	     be32_to_cpu(lvb->lvb_igeneration));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	     (unsigned long long)be64_to_cpu(lvb->lvb_isize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	     be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	     be16_to_cpu(lvb->lvb_imode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	     "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	     (long long)be64_to_cpu(lvb->lvb_iatime_packed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	     (long long)be64_to_cpu(lvb->lvb_ictime_packed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	     (long long)be64_to_cpu(lvb->lvb_imtime_packed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	     be32_to_cpu(lvb->lvb_iattr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * OCFS2 Lock Resource Operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * These fine tune the behavior of the generic dlmglue locking infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * The most basic of lock types can point ->l_priv to their respective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * struct ocfs2_super and allow the default actions to manage things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * Right now, each lock type also needs to implement an init function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * should be called when the lock is no longer needed (i.e., object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * destruction time).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) struct ocfs2_lock_res_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * this callback if ->l_priv is not an ocfs2_super pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	 * Optionally called in the downconvert thread after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	 * successful downconvert. The lockres will not be referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	 * after this callback is called, so it is safe to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	 * memory, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	 * The exact semantics of when this is called are controlled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	 * by ->downconvert_worker()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	 * Allow a lock type to add checks to determine whether it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	 * safe to downconvert a lock. Return 0 to re-queue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	 * downconvert at a later time, nonzero to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * For most locks, the default checks that there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * incompatible holders are sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	 * Called with the lockres spinlock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	int (*check_downconvert)(struct ocfs2_lock_res *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	 * Allows a lock type to populate the lock value block. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 * is called on downconvert, and when we drop a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 * Locks that want to use this should set LOCK_TYPE_USES_LVB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 * in the flags field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * Called with the lockres spinlock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	void (*set_lvb)(struct ocfs2_lock_res *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	 * Called from the downconvert thread when it is determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	 * that a lock will be downconverted. This is called without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 * any locks held so the function can do work that might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * schedule (syncing out data, etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * This should return any one of the ocfs2_unblock_action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 * values, depending on what it wants the thread to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	int (*downconvert_worker)(struct ocfs2_lock_res *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * LOCK_TYPE_* flags which describe the specific requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * of a lock type. Descriptions of each individual flag follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * Some locks want to "refresh" potentially stale data when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * individual lockres l_flags member from the ast function. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * expected that the locking wrapper will clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * OCFS2_LOCK_NEEDS_REFRESH flag when done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define LOCK_TYPE_REQUIRES_REFRESH 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * Indicate that a lock type makes use of the lock value block. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * ->set_lvb lock type callback must be defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define LOCK_TYPE_USES_LVB		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	.get_osb	= ocfs2_get_inode_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	.get_osb	= ocfs2_get_inode_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	.check_downconvert = ocfs2_check_meta_downconvert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	.set_lvb	= ocfs2_set_meta_lvb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	.downconvert_worker = ocfs2_data_convert_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static struct ocfs2_lock_res_ops ocfs2_super_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	.flags		= LOCK_TYPE_REQUIRES_REFRESH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	.flags		= LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	.get_osb	= ocfs2_get_dentry_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	.post_unlock	= ocfs2_dentry_post_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	.downconvert_worker = ocfs2_dentry_convert_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	.get_osb	= ocfs2_get_inode_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	.get_osb	= ocfs2_get_file_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	.set_lvb	= ocfs2_set_qinfo_lvb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	.get_osb	= ocfs2_get_qinfo_osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	.flags		= LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	.check_downconvert = ocfs2_check_refcount_downconvert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	.downconvert_worker = ocfs2_refcount_convert_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	.flags		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return lockres->l_type == OCFS2_LOCK_TYPE_META ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		lockres->l_type == OCFS2_LOCK_TYPE_RW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return container_of(lksb, struct ocfs2_lock_res, l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	BUG_ON(!ocfs2_is_inode_lock(lockres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	return (struct inode *) lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	return (struct ocfs2_dentry_lock *)lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static inline struct ocfs2_refcount_tree *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (lockres->l_ops->get_osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		return lockres->l_ops->get_osb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return (struct ocfs2_super *)lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int ocfs2_lock_create(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			     struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			     int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			     u32 dlm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 						     int wanted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				   struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				   int level, unsigned long caller_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 					struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 					int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	__ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 					struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 						int convert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) #define ocfs2_log_dlm_error(_func, _err, _lockres) do {					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		     _err, _func, _lockres->l_name);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	else										\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		     _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		     (unsigned int)ocfs2_get_dentry_lock_ino(_lockres));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static int ocfs2_downconvert_thread(void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 					struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static int ocfs2_inode_lock_update(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 				  struct buffer_head **bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static inline int ocfs2_highest_compat_lock_level(int level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 					      int new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 				  struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				  int new_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				  int lvb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				  unsigned int generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				        struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static int ocfs2_cancel_convert(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				  u64 blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				  u32 generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 				  char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		       ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		       (long long)blkno, generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	mlog(0, "built lock resource with name: %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				       struct ocfs2_dlm_debug *dlm_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	mlog(0, "Add tracking for lockres %s\n", res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	spin_lock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	spin_unlock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	spin_lock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (!list_empty(&res->l_debug_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		list_del_init(&res->l_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	spin_unlock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #ifdef CONFIG_OCFS2_FS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	res->l_lock_refresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	res->l_lock_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				    struct ocfs2_mask_waiter *mw, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	u32 usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	ktime_t kt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct ocfs2_lock_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (level == LKM_PRMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		stats = &res->l_lock_prmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	else if (level == LKM_EXMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		stats = &res->l_lock_exmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	kt = ktime_sub(ktime_get(), mw->mw_lock_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	usec = ktime_to_us(kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	stats->ls_gets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	stats->ls_total += ktime_to_ns(kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	/* overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (unlikely(stats->ls_gets == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		stats->ls_gets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		stats->ls_total = ktime_to_ns(kt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (stats->ls_max < usec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		stats->ls_max = usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		stats->ls_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	stats->ls_last = ktime_to_us(ktime_get_real());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	lockres->l_lock_refresh++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct ocfs2_mask_waiter *mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (list_empty(&lockres->l_mask_waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		lockres->l_lock_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	mw = list_first_entry(&lockres->l_mask_waiters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				struct ocfs2_mask_waiter, mw_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	lockres->l_lock_wait =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	mw->mw_lock_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			   int level, struct ocfs2_mask_waiter *mw, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				       struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				       enum ocfs2_lock_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 				       struct ocfs2_lock_res_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				       void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	res->l_type          = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	res->l_ops           = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	res->l_priv          = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	res->l_level         = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	res->l_requested     = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	res->l_blocking      = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	res->l_action        = OCFS2_AST_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	res->l_unlock_action = OCFS2_UNLOCK_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	res->l_flags         = OCFS2_LOCK_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	ocfs2_init_lock_stats(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (type != OCFS2_LOCK_TYPE_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				 &lockdep_keys[type], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		res->l_lockdep_map.key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* This also clears out the lock status block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	memset(res, 0, sizeof(struct ocfs2_lock_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	spin_lock_init(&res->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	init_waitqueue_head(&res->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	INIT_LIST_HEAD(&res->l_blocked_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	INIT_LIST_HEAD(&res->l_mask_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	INIT_LIST_HEAD(&res->l_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			       enum ocfs2_lock_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			       unsigned int generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			       struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct ocfs2_lock_res_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	switch(type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		case OCFS2_LOCK_TYPE_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			ops = &ocfs2_inode_rw_lops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		case OCFS2_LOCK_TYPE_META:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			ops = &ocfs2_inode_inode_lops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		case OCFS2_LOCK_TYPE_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			ops = &ocfs2_inode_open_lops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			mlog_bug_on_msg(1, "type: %d\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			ops = NULL; /* thanks, gcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			      generation, res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct inode *inode = ocfs2_lock_res_inode(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	return OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct ocfs2_mem_dqinfo *info = lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	return OCFS2_SB(info->dqi_gi.dqi_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct ocfs2_file_private *fp = lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	__be64 inode_blkno_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	       sizeof(__be64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return be64_to_cpu(inode_blkno_be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct ocfs2_dentry_lock *dl = lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return OCFS2_SB(dl->dl_inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				u64 parent, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	struct ocfs2_lock_res *lockres = &dl->dl_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	ocfs2_lock_res_init_once(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * Unfortunately, the standard lock naming scheme won't work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 * here because we have two 16 byte values to use. Instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	 * we'll stuff the inode number as a binary value. We still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	 * want error prints to show something without garbling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 * display, so drop a null byte in there before the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	 * number. A future version of OCFS2 will likely use all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 * binary lock names. The stringified names have been a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 * tremendous aid in debugging, but now that the debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * interface exists, we can mangle things there if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 * NOTE: We also drop the standard "pad" value (the total lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	 * name size stays the same though - the last part is all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 * zeros due to the memset in ocfs2_lock_res_init_once()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		       "%c%016llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		       ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		       (long long)parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	       sizeof(__be64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				   OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 				   dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 				      struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/* Superblock lockres doesn't come from a slab so we call init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * once on it manually.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	ocfs2_lock_res_init_once(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			      0, res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				   &ocfs2_super_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 				       struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Rename lockres doesn't come from a slab so we call init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * once on it manually.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	ocfs2_lock_res_init_once(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				   &ocfs2_rename_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 					 struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* nfs_sync lockres doesn't come from a slab so we call init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * once on it manually.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	ocfs2_lock_res_init_once(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				   &ocfs2_nfs_sync_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	init_rwsem(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/* Only one trimfs thread are allowed to work at the same time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	mutex_lock(&osb->obs_trim_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	ocfs2_lock_res_init_once(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 				   &ocfs2_trim_fs_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	ocfs2_simple_drop_lockres(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	ocfs2_lock_res_free(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	mutex_unlock(&osb->obs_trim_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 					    struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	ocfs2_lock_res_init_once(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				   &ocfs2_orphan_scan_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			      struct ocfs2_file_private *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	struct inode *inode = fp->fp_file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	ocfs2_lock_res_init_once(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			      inode->i_generation, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 				   OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				   fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	lockres->l_flags |= OCFS2_LOCK_NOCACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			       struct ocfs2_mem_dqinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	ocfs2_lock_res_init_once(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			      0, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 				   OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 				   info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				  struct ocfs2_super *osb, u64 ref_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 				  unsigned int generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	ocfs2_lock_res_init_once(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			      generation, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				   &ocfs2_refcount_block_lops, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	ocfs2_remove_lockres_tracking(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			"Lockres %s is on the blocked list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			"Lockres %s has mask waiters pending\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	mlog_bug_on_msg(spin_is_locked(&res->l_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			"Lockres %s is locked\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			res->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	mlog_bug_on_msg(res->l_ro_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			"Lockres %s has %u ro holders\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			res->l_name, res->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	mlog_bug_on_msg(res->l_ex_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			"Lockres %s has %u ex holders\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			res->l_name, res->l_ex_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/* Need to clear out the lock status block for the dlm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	memset(&res->l_lksb, 0, sizeof(res->l_lksb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	res->l_flags = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  * Keep a list of processes who have interest in a lockres.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * Note: this is now only uesed for check recursive cluster locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				   struct ocfs2_lock_holder *oh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	INIT_LIST_HEAD(&oh->oh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	oh->oh_owner_pid = get_pid(task_pid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	list_add_tail(&oh->oh_list, &lockres->l_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static struct ocfs2_lock_holder *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		struct pid *pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct ocfs2_lock_holder *oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	list_for_each_entry(oh, &lockres->l_holders, oh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (oh->oh_owner_pid == pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			return oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				       struct ocfs2_lock_holder *oh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	spin_lock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	list_del(&oh->oh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	spin_unlock(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	put_pid(oh->oh_owner_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				     int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	BUG_ON(!lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	switch(level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		lockres->l_ex_holders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		lockres->l_ro_holders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 				     int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	BUG_ON(!lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	switch(level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		BUG_ON(!lockres->l_ex_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		lockres->l_ex_holders--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		BUG_ON(!lockres->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		lockres->l_ro_holders--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) /* WARNING: This function lives in a world where the only three lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  * levels are EX, PR, and NL. It *will* have to be adjusted when more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * lock types are added. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static inline int ocfs2_highest_compat_lock_level(int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int new_level = DLM_LOCK_EX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (level == DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		new_level = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	else if (level == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		new_level = DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	return new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) static void lockres_set_flags(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			      unsigned long newflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct ocfs2_mask_waiter *mw, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	lockres->l_flags = newflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		list_del_init(&mw->mw_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		mw->mw_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		complete(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		ocfs2_track_lock_wait(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	lockres_set_flags(lockres, lockres->l_flags | or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 				unsigned long clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	lockres_set_flags(lockres, lockres->l_flags & ~clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	lockres->l_level = lockres->l_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (lockres->l_level <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	    ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		lockres->l_blocking = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/* Convert from RO to EX doesn't really need anything as our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * information is already up to data. Convert from NL to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * *anything* however should mark ourselves as needing an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (lockres->l_level == DLM_LOCK_NL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	lockres->l_level = lockres->l_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * downconverting the lock before the upconvert has fully completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * Do not prevent the dc thread from downconverting if NONBLOCK lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * had already returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	if (lockres->l_requested > DLM_LOCK_NL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	    !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	    lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	lockres->l_level = lockres->l_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				     int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	int needs_downconvert = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (level > lockres->l_blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		/* only schedule a downconvert if we haven't already scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 * one that goes low enough to satisfy the level we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 * blocking.  this also catches the case where we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 * duplicate BASTs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		if (ocfs2_highest_compat_lock_level(level) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		    ocfs2_highest_compat_lock_level(lockres->l_blocking))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			needs_downconvert = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		lockres->l_blocking = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	     lockres->l_name, level, lockres->l_level, lockres->l_blocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	     needs_downconvert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (needs_downconvert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	mlog(0, "needs_downconvert = %d\n", needs_downconvert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	return needs_downconvert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * OCFS2_LOCK_PENDING and l_pending_gen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * Why does OCFS2_LOCK_PENDING exist?  To close a race between setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock().  See ocfs2_unblock_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * for more details on the race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * OCFS2_LOCK_PENDING closes the race quite nicely.  However, it introduces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * a race on itself.  In o2dlm, we can get the ast before ocfs2_dlm_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * returns.  The ast clears OCFS2_LOCK_BUSY, and must therefore clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * OCFS2_LOCK_PENDING at the same time.  When ocfs2_dlm_lock() returns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * the caller is going to try to clear PENDING again.  If nothing else is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * happening, __lockres_clear_pending() sees PENDING is unset and does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  * nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  * But what if another path (eg downconvert thread) has just started a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * new locking action?  The other path has re-set PENDING.  Our path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * cannot clear PENDING, because that will re-open the original race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * [Example]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * ocfs2_meta_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *  ocfs2_cluster_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  *   set BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  *   set PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  *   drop l_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *   ocfs2_dlm_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  *    ocfs2_locking_ast()		ocfs2_downconvert_thread()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  *     clear PENDING			 ocfs2_unblock_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  *					  take_l_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *					  !BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  *					  ocfs2_prepare_downconvert()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  *					   set BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  *					   set PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  *					  drop l_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  *   take l_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  *   clear PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  *   drop l_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  *			<window>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *					  ocfs2_dlm_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  * So as you can see, we now have a window where l_lock is not held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * PENDING is not set, and ocfs2_dlm_lock() has not been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * set by ocfs2_prepare_downconvert().  That wasn't nice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * To solve this we introduce l_pending_gen.  A call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * lockres_clear_pending() will only do so when it is passed a generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * number that matches the lockres.  lockres_set_pending() will return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * current generation number.  When ocfs2_cluster_lock() goes to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  * PENDING, it passes the generation it got from set_pending().  In our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * example above, the generation numbers will *not* match.  Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  * ocfs2_cluster_lock() will not clear the PENDING set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  * ocfs2_prepare_downconvert().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* Unlocked version for ocfs2_locking_ast() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				    unsigned int generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				    struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	 * The ast and locking functions can race us here.  The winner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	 * will clear pending, the loser will not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	    (lockres->l_pending_gen != generation))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	lockres->l_pending_gen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * The downconvert thread may have skipped us because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 * were PENDING.  Wake it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		ocfs2_wake_downconvert_thread(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Locked version for callers of ocfs2_dlm_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				  unsigned int generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				  struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	__lockres_clear_pending(lockres, generation, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	return lockres->l_pending_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	int needs_downconvert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	BUG_ON(level <= DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	     "type %s\n", lockres->l_name, level, lockres->l_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	     ocfs2_lock_type_string(lockres->l_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * We can skip the bast for locks which don't enable caching -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * they'll be dropped at the earliest possible time anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (needs_downconvert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		ocfs2_schedule_blocked_lock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	ocfs2_wake_downconvert_thread(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	status = ocfs2_dlm_lock_status(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (status == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		     lockres->l_name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	     "level %d => %d\n", lockres->l_name, lockres->l_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	     lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	switch(lockres->l_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	case OCFS2_AST_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		ocfs2_generic_handle_attach_action(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	case OCFS2_AST_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		ocfs2_generic_handle_convert_action(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	case OCFS2_AST_DOWNCONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		ocfs2_generic_handle_downconvert_action(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		     "flags 0x%lx, unlock: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		     lockres->l_name, lockres->l_action, lockres->l_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		     lockres->l_unlock_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	/* set it to something invalid so if we get called again we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	 * can catch it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	lockres->l_action = OCFS2_AST_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	/* Did we try to cancel this lock?  Clear that state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	 * We may have beaten the locking functions here.  We certainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	 * know that dlm_lock() has been called :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	 * Because we can't have two lock calls in flight at once, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	 * can use lockres->l_pending_gen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	__lockres_clear_pending(lockres, lockres->l_pending_gen,  osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	     lockres->l_name, lockres->l_unlock_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		     "unlock_action %d\n", error, lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		     lockres->l_unlock_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	switch(lockres->l_unlock_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	case OCFS2_UNLOCK_CANCEL_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		mlog(0, "Cancel convert success for %s\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		lockres->l_action = OCFS2_AST_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		/* Downconvert thread may have requeued this lock, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * need to wake it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	case OCFS2_UNLOCK_DROP_LOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		lockres->l_level = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * This is the filesystem locking protocol.  It provides the lock handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * hooks for the underlying DLM.  It has a maximum version number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * The version number allows interoperability with systems running at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * the same major number and an equal or smaller minor number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  * Whenever the filesystem does new things with locks (adds or removes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  * lock, orders them differently, does different things underneath a lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * the version must be changed.  The protocol is negotiated when joining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  * the dlm domain.  A node may join the domain if its major version is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  * identical to all other nodes and its minor version is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  * or equal to all other nodes.  When its minor version is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * the other nodes, it will run at the minor version specified by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * other nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * If a locking change is made that will not be compatible with older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * versions, the major number must be increased and the minor version set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * to zero.  If a change merely adds a behavior that can be disabled when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * speaking to older versions, the minor version must be increased.  If a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  * change adds a fully backwards compatible change (eg, LVB changes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  * are just ignored by older versions), the version does not need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  * updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static struct ocfs2_locking_protocol lproto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	.lp_max_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	.lp_lock_ast		= ocfs2_locking_ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	.lp_blocking_ast	= ocfs2_blocking_ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	.lp_unlock_ast		= ocfs2_unlock_ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) void ocfs2_set_locking_protocol(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 						int convert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (convert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		lockres->l_action = OCFS2_AST_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Note: If we detect another process working on the lock (i.e.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * to do the right thing in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int ocfs2_lock_create(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			     struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			     int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			     u32 dlm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	unsigned int gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	     dlm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	    (lockres->l_flags & OCFS2_LOCK_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	lockres->l_action = OCFS2_AST_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	lockres->l_requested = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	gen = lockres_set_pending(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	ret = ocfs2_dlm_lock(osb->cconn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			     level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			     &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			     dlm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			     lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			     OCFS2_LOCK_ID_MAX_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	lockres_clear_pending(lockres, gen, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		ocfs2_recover_from_dlm_error(lockres, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 					int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	ret = lockres->l_flags & flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	wait_event(lockres->l_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	wait_event(lockres->l_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		   !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* predict what lock level we'll be dropping down to on behalf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * of another node, and return true if the currently wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  * level will be compatible with it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 						     int wanted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	INIT_LIST_HEAD(&mw->mw_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	init_completion(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	ocfs2_init_start_time(mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	wait_for_completion(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	/* Re-arm the completion in case we want to wait on it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	reinit_completion(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	return mw->mw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 				    struct ocfs2_mask_waiter *mw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 				    unsigned long mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				    unsigned long goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	BUG_ON(!list_empty(&mw->mw_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	mw->mw_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	mw->mw_goal = goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	ocfs2_track_lock_wait(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* returns 0 if the mw that was removed was already satisfied, -EBUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  * if the mask still hadn't reached its goal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 				      struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (!list_empty(&mw->mw_item)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		list_del_init(&mw->mw_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		init_completion(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		ocfs2_track_lock_wait(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				      struct ocfs2_mask_waiter *mw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	ret = __lockres_remove_mask_waiter(lockres, mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 					     struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	ret = wait_for_completion_interruptible(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		lockres_remove_mask_waiter(lockres, mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		ret = mw->mw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	/* Re-arm the completion in case we want to wait on it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	reinit_completion(&mw->mw_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 				struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 				int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				u32 lkm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 				int arg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 				int l_subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 				unsigned long caller_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	struct ocfs2_mask_waiter mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	unsigned int gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	int noqueue_attempted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	int dlm_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	int kick_dc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		mlog_errno(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	ocfs2_init_mask_waiter(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		lkm_flags |= DLM_LKF_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (catch_signals && signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			"Cluster lock called on freeing lockres %s! flags "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			"0x%lx\n", lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	/* We only compare against the currently granted level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	 * here. If the lock is blocked waiting on a downconvert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	 * we'll get caught below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (lockres->l_flags & OCFS2_LOCK_BUSY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	    level > lockres->l_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		/* is someone sitting in dlm_lock? If so, wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		 * them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		 * We've upconverted. If the lock now has a level we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		 * work with, we take it. If, however, the lock is not at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		 * required level, we go thru the full cycle. One way this could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		 * happen is if a process requesting an upconvert to PR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		 * closely followed by another requesting upconvert to an EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		 * If the process requesting EX lands here, we want it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		 * continue attempting to upconvert and let the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		 * requesting PR take the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		 * If multiple processes request upconvert to PR, the first one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		 * here will take the lock. The others will have to go thru the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		 * downconvert request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (level <= lockres->l_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			goto update_holders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	    !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		/* is the lock is currently blocked on behalf of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		 * another node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (level > lockres->l_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		if (noqueue_attempted > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		if (lkm_flags & DLM_LKF_NOQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			noqueue_attempted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		if (lockres->l_action != OCFS2_AST_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			mlog(ML_ERROR, "lockres %s has action %u pending\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			     lockres->l_name, lockres->l_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			lockres->l_action = OCFS2_AST_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			lkm_flags &= ~DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			lockres->l_action = OCFS2_AST_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			lkm_flags |= DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		lockres->l_requested = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		gen = lockres_set_pending(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		BUG_ON(level == DLM_LOCK_IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		BUG_ON(level == DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		     lockres->l_name, lockres->l_level, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		/* call dlm_lock to upgrade lock now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		ret = ocfs2_dlm_lock(osb->cconn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 				     level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 				     &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 				     lkm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				     lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 				     OCFS2_LOCK_ID_MAX_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		lockres_clear_pending(lockres, gen, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			    (ret != -EAGAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 				ocfs2_log_dlm_error("ocfs2_dlm_lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 						    ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			ocfs2_recover_from_dlm_error(lockres, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		dlm_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		/* At this point we've gone inside the dlm and need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		 * complete our work regardless. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		catch_signals = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		/* wait for busy to clear and carry on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) update_holders:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	/* Ok, if we get here then we're good to go. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	ocfs2_inc_holders(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (kick_dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		ocfs2_wake_downconvert_thread(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 * This is helping work around a lock inversion between the page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * and dlm locks.  One path holds the page lock while calling aops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * which block acquiring dlm locks.  The voting thread holds dlm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * locks while acquiring page locks while down converting data locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 * This block is helping an aop path notice the inversion and back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 * off to unlock its page lock before trying the dlm lock again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	    mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		if (__lockres_remove_mask_waiter(lockres, &mw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			if (dlm_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				lockres_or_flags(lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 					OCFS2_LOCK_NONBLOCK_FINISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		ret = ocfs2_wait_for_mask(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	ocfs2_update_lock_stats(lockres, level, &mw, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	if (!ret && lockres->l_lockdep_map.key != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		if (level == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 				caller_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 				!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 				caller_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 				     struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 				     int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 				     u32 lkm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				     int arg_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 				    0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 				   struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 				   int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				   unsigned long caller_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	ocfs2_dec_holders(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	ocfs2_downconvert_on_unlock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	if (lockres->l_lockdep_map.key != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		rwsem_release(&lockres->l_lockdep_map, caller_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static int ocfs2_create_new_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				 struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				 int ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				 int local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	int level =  ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	return ocfs2_lock_create(osb, lockres, level, lkm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* Grants us an EX lock on the data and metadata resources, skipping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * the normal cluster directory lookup. Use this ONLY on newly created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * inodes which other nodes can't possibly see, and which haven't been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  * hashed in the inode hash yet. This can give us a good performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  * increase as it'll skip the network broadcast normally associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * with creating a new lock resource. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) int ocfs2_create_new_inode_locks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	BUG_ON(!ocfs2_inode_is_new(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	/* NOTE: That we don't increment any of the holder counts, nor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	 * do we add anything to a journal handle. Since this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	 * supposed to be a new inode which the cluster doesn't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	 * about yet, there is no need to.  As far as the LVB handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	 * is concerned, this is basically like acquiring an EX lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	 * on a resource which has an invalid one -- we'll set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 * valid when we release the EX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	 * don't use a generation in their lock names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) int ocfs2_rw_lock(struct inode *inode, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	int status, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	mlog(0, "inode %llu take %s RW lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	     write ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) int ocfs2_try_rw_lock(struct inode *inode, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	int status, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	mlog(0, "inode %llu try to take %s RW lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	     write ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	lockres = &OCFS2_I(inode)->ip_rw_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) void ocfs2_rw_unlock(struct inode *inode, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	mlog(0, "inode %llu drop %s RW lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	     write ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	if (!ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)  * ocfs2_open_lock always get PR mode lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) int ocfs2_open_lock(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	mlog(0, "inode %llu take PRMODE open lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	lockres = &OCFS2_I(inode)->ip_open_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) int ocfs2_try_open_lock(struct inode *inode, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	int status = 0, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	mlog(0, "inode %llu try to take %s open lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	     write ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	if (ocfs2_is_hard_readonly(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			status = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	lockres = &OCFS2_I(inode)->ip_open_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	 * The file system may already holding a PRMODE/EXMODE open lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 * other nodes and the -EAGAIN will indicate to the caller that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * this inode is still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  * ocfs2_open_unlock unlock PR and EX mode open locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) void ocfs2_open_unlock(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	mlog(0, "inode %llu drop open lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if(lockres->l_ro_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	if(lockres->l_ex_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 				     int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	struct ocfs2_mask_waiter mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	ocfs2_init_mask_waiter(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) retry_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			ret = ocfs2_cancel_convert(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 				mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			goto retry_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		ocfs2_wait_for_mask(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		goto retry_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	 * We may still have gotten the lock, in which case there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	 * point to restarting the syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	if (lockres->l_level == level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	     lockres->l_flags, lockres->l_level, lockres->l_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)  * flock() calls. The locking approach this requires is sufficiently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  * different from all other cluster lock types that we implement a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  * separate path to the "low-level" dlm calls. In particular:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  * - No optimization of lock levels is done - we take at exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  *   what's been requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  * - No lock caching is employed. We immediately downconvert to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)  *   no-lock at unlock time. This also means flock locks never go on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)  *   the blocking list).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)  * - Since userspace can trivially deadlock itself with flock, we make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)  *   sure to allow cancellation of a misbehaving applications flock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  *   request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  * - Access to any flock lockres doesn't require concurrency, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  *   can simplify the code by requiring the caller to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  *   serialization of dlmglue flock calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) int ocfs2_file_lock(struct file *file, int ex, int trylock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct ocfs2_file_private *fp = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	struct ocfs2_mask_waiter mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	ocfs2_init_mask_waiter(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	    (lockres->l_level > DLM_LOCK_NL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		mlog(ML_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		     "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		     "level: %u\n", lockres->l_name, lockres->l_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		     lockres->l_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		 * Get the lock at NLMODE to start - that way we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		 * can cancel the upconvert request if need be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		ret = ocfs2_wait_for_mask(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	lockres->l_action = OCFS2_AST_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	lkm_flags |= DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	lockres->l_requested = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			     lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		if (!trylock || (ret != -EAGAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		ocfs2_recover_from_dlm_error(lockres, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		lockres_remove_mask_waiter(lockres, &mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	if (ret == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		 * Userspace can cause deadlock itself with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		 * flock(). Current behavior locally is to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		 * deadlock, but abort the system call if a signal is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		 * received. We follow this example, otherwise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		 * poorly written program could sit in kernel until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		 * reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		 * Handling this is a bit more complicated for Ocfs2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		 * though. We can't exit this function with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		 * outstanding lock request, so a cancel convert is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		 * required. We intentionally overwrite 'ret' - if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		 * cancel fails and the lock was granted, it's easier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		 * to just bubble success back up to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		ret = ocfs2_flock_handle_signal(lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	} else if (!ret && (level > lockres->l_level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		/* Trylock failed asynchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		BUG_ON(!trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	     lockres->l_name, ex, trylock, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) void ocfs2_file_unlock(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	unsigned int gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	struct ocfs2_file_private *fp = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct ocfs2_lock_res *lockres = &fp->fp_flock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	struct ocfs2_mask_waiter mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	ocfs2_init_mask_waiter(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	if (lockres->l_level == DLM_LOCK_NL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	     lockres->l_name, lockres->l_flags, lockres->l_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	     lockres->l_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	 * Fake a blocking ast for the downconvert code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	lockres->l_blocking = DLM_LOCK_EX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	ret = ocfs2_wait_for_mask(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 					struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	int kick = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	/* If we know that another node is waiting on our lock, kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	 * the downconvert thread * pre-emptively when we reach a release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	 * condition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		switch(lockres->l_blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		case DLM_LOCK_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			if (!lockres->l_ex_holders && !lockres->l_ro_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 				kick = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		case DLM_LOCK_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			if (!lockres->l_ex_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 				kick = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		ocfs2_wake_downconvert_thread(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) #define OCFS2_SEC_BITS   34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) #define OCFS2_SEC_SHIFT  (64 - OCFS2_SEC_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) #define OCFS2_NSEC_MASK  ((1ULL << OCFS2_SEC_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* LVB only has room for 64 bits of time here so we pack it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static u64 ocfs2_pack_timespec(struct timespec64 *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	u64 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	u32 nsec = spec->tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /* Call this with the lockres locked. I am reasonably sure we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  * need ip_lock in this function as anyone who would be changing those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)  * values is supposed to be blocked in ocfs2_inode_lock right now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static void __ocfs2_stuff_meta_lvb(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	struct ocfs2_meta_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	 * Invalidate the LVB of a deleted inode - this way other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	 * nodes are forced to go to disk and discover the new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	 * status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		lvb->lvb_version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	lvb->lvb_version   = OCFS2_LVB_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	lvb->lvb_isize	   = cpu_to_be64(i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	lvb->lvb_iuid      = cpu_to_be32(i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	lvb->lvb_igid      = cpu_to_be32(i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	lvb->lvb_iatime_packed  =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	lvb->lvb_ictime_packed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	lvb->lvb_imtime_packed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	lvb->lvb_iattr    = cpu_to_be32(oi->ip_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	mlog_meta_lvb(0, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static void ocfs2_unpack_timespec(struct timespec64 *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 				  u64 packed_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	struct ocfs2_meta_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	mlog_meta_lvb(0, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	/* We're safe here without the lockres lock... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	ocfs2_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	/* fast-symlinks are a special case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		inode->i_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	ocfs2_unpack_timespec(&inode->i_atime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 			      be64_to_cpu(lvb->lvb_iatime_packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	ocfs2_unpack_timespec(&inode->i_mtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			      be64_to_cpu(lvb->lvb_imtime_packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	ocfs2_unpack_timespec(&inode->i_ctime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			      be64_to_cpu(lvb->lvb_ictime_packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 					      struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	    && lvb->lvb_version == OCFS2_LVB_VERSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	    && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* Determine whether a lock resource needs to be refreshed, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  * arbitrate who gets to refresh it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)  *   0 means no refresh needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)  *   > 0 means you need to refresh this and you MUST call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)  *   ocfs2_complete_lock_res_refresh afterwards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) refresh_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		ocfs2_wait_on_refreshing_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		goto refresh_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	/* Ok, I'll be the one to refresh this lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	mlog(0, "status %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /* If status is non zero, I'll mark it as not being in refresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)  * anymroe, but i won't clear the needs refresh flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 						   int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	wake_up(&lockres->l_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /* may or may not return a bh if it went to disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) static int ocfs2_inode_lock_update(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 				  struct buffer_head **bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	struct ocfs2_dinode *fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	if (oi->ip_flags & OCFS2_INODE_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		mlog(0, "Orphaned inode %llu was deleted while we "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		     "were waiting on a lock. ip_flags = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		     (unsigned long long)oi->ip_blkno, oi->ip_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		status = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	if (!ocfs2_should_refresh_lock_res(lockres))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	/* This will discard any caching information we might have had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	 * for the inode metadata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	ocfs2_metadata_cache_purge(INODE_CACHE(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	ocfs2_extent_map_trunc(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		mlog(0, "Trusting LVB on inode %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		     (unsigned long long)oi->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		ocfs2_refresh_inode_from_lvb(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		/* Boo, we have to go to disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		/* read bh, cast, ocfs2_refresh_inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		status = ocfs2_read_inode_block(inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 			goto bail_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		fe = (struct ocfs2_dinode *) (*bh)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		/* This is a good chance to make sure we're not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		 * locking an invalid object.  ocfs2_read_inode_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		 * already checked that the inode block is sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		 * We bug on a stale inode here because we checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		 * above whether it was wiped from disk. The wiping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		 * node provides a guarantee that we receive that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		 * message and can mark the inode before dropping any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		 * locks associated with it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		mlog_bug_on_msg(inode->i_generation !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 				le32_to_cpu(fe->i_generation),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 				"Invalid dinode %llu disk generation: %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 				"inode->i_generation: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 				(unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 				le32_to_cpu(fe->i_generation),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 				inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 				!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				"Stale dinode %llu dtime: %llu flags: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 				(unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 				(unsigned long long)le64_to_cpu(fe->i_dtime),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 				le32_to_cpu(fe->i_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		ocfs2_refresh_inode(inode, fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		ocfs2_track_lock_refresh(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) bail_refresh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	ocfs2_complete_lock_res_refresh(lockres, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static int ocfs2_assign_bh(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			   struct buffer_head **ret_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 			   struct buffer_head *passed_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	if (passed_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		/* Ok, the update went to disk for us, use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		 * returned bh. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		*ret_bh = passed_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		get_bh(*ret_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	status = ocfs2_read_inode_block(inode, ret_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)  * returns < 0 error if the callback will never be called, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)  * the result of the lock will be communicated via the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) int ocfs2_inode_lock_full_nested(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 				 struct buffer_head **ret_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 				 int ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 				 int arg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 				 int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	int status, level, acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	u32 dlm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	struct ocfs2_lock_res *lockres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	struct buffer_head *local_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	mlog(0, "inode %llu, take %s META lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	     ex ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	acquired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/* We'll allow faking a readonly metadata lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	 * rodevices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	if (ocfs2_is_hard_readonly(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			status = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		goto getbh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	    ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		goto update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		ocfs2_wait_for_recovery(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	dlm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		dlm_flags |= DLM_LKF_NOQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 				      arg_flags, subclass, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		if (status != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/* Notify the error cleanup path to drop the cluster lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	acquired = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	/* We wait twice because a node may have died while we were in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	 * the lower dlm layers. The second time though, we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	 * committed to owning this lock so we don't allow signals to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	 * abort the operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		ocfs2_wait_for_recovery(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	 * We only see this flag if we're being called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	 * ocfs2_read_locked_inode(). It means we're locking an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	 * which hasn't been populated yet, so clear the refresh flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	 * and let the caller handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	if (inode->i_state & I_NEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		if (lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 			ocfs2_complete_lock_res_refresh(lockres, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	/* This is fun. The caller may want a bh back, or it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	 * not. ocfs2_inode_lock_update definitely wants one in, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	 * may or may not read one, depending on what's in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	 * LVB. The result of all of this is that we've *only* gone to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	 * disk if we have to, so the complexity is worthwhile. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	status = ocfs2_inode_lock_update(inode, &local_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		if (status != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) getbh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	if (ret_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		status = ocfs2_assign_bh(inode, ret_bh, local_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		if (ret_bh && (*ret_bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 			brelse(*ret_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			*ret_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		if (acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 			ocfs2_inode_unlock(inode, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	brelse(local_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)  * This is working around a lock inversion between tasks acquiring DLM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)  * locks while holding a page lock and the downconvert thread which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)  * blocks dlm lock acquiry while acquiring page locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)  * ** These _with_page variantes are only intended to be called from aop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)  * methods that hold page locks and return a very specific *positive* error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)  * code that aop methods pass up to the VFS -- test for errors with != 0. **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)  * The DLM is called such that it returns -EAGAIN if it would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)  * blocked waiting for the downconvert thread.  In that case we unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)  * our page so the downconvert thread can make progress.  Once we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)  * done this we have to return AOP_TRUNCATED_PAGE so the aop method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)  * that called us can bubble that back up into the VFS who will then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)  * immediately retry the aop call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) int ocfs2_inode_lock_with_page(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 			      struct buffer_head **ret_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 			      int ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 			      struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		 * If we can't get inode lock immediately, we should not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		 * directly here, since this will lead to a softlockup problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		 * The method is to get a blocking lock and immediately unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		 * before returning, this can avoid CPU resource waste due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		 * lots of retries, and benefits fairness in getting lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			ocfs2_inode_unlock(inode, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		ret = AOP_TRUNCATED_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) int ocfs2_inode_lock_atime(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			  struct vfsmount *vfsmnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			  int *level, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		ret = ocfs2_inode_lock(inode, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		ret = ocfs2_try_inode_lock(inode, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	 * If we should update atime, we will get EX lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	 * otherwise we just get PR lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	if (ocfs2_should_update_atime(inode, vfsmnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			ret = ocfs2_inode_lock(inode, &bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 			ret = ocfs2_try_inode_lock(inode, &bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 			if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 				mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		*level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		if (ocfs2_should_update_atime(inode, vfsmnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			ocfs2_update_inode_atime(inode, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 		*level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) void ocfs2_inode_unlock(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		       int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	mlog(0, "inode %llu drop %s META lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	     ex ? "EXMODE" : "PRMODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	if (!ocfs2_is_hard_readonly(osb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	    !ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  * This _tracker variantes are introduced to deal with the recursive cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)  * locking issue. The idea is to keep track of a lock holder on the stack of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  * the current process. If there's a lock holder on the stack, we know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  * task context is already protected by cluster locking. Currently, they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  * used in some VFS entry routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)  * return < 0 on error, return == 0 if there's no lock holder on the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)  * before this call, return == 1 if this call would be a recursive locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)  * return == -1 if this lock attempt will cause an upgrade which is forbidden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)  * When taking lock levels into account,we face some different situations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)  * 1. no lock is held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)  *    In this case, just lock the inode as requested and return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)  * 2. We are holding a lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)  *    For this situation, things diverges into several cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)  *    wanted     holding	     what to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)  *    ex		ex	    see 2.1 below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)  *    ex		pr	    see 2.2 below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)  *    pr		ex	    see 2.1 below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)  *    pr		pr	    see 2.1 below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)  *    2.1 lock level that is been held is compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)  *    with the wanted level, so no lock action will be tacken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)  *    2.2 Otherwise, an upgrade is needed, but it is forbidden.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * Reason why upgrade within a process is forbidden is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  * lock upgrade may cause dead lock. The following illustrates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  * how it happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)  *         thread on node1                             thread on node2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)  * ocfs2_inode_lock_tracker(ex=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)  *                                <======   ocfs2_inode_lock_tracker(ex=1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)  * ocfs2_inode_lock_tracker(ex=1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) int ocfs2_inode_lock_tracker(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			     struct buffer_head **ret_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 			     int ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			     struct ocfs2_lock_holder *oh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	struct ocfs2_lock_holder *tmp_oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	struct pid *pid = task_pid(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	tmp_oh = ocfs2_pid_holder(lockres, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	if (!tmp_oh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		 * This corresponds to the case 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		 * We haven't got any lock before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			if (status != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 				mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		oh->oh_ex = ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		ocfs2_add_holder(lockres, oh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	if (unlikely(ex && !tmp_oh->oh_ex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		 * case 2.2 upgrade may cause dead lock, forbid it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		mlog(ML_ERROR, "Recursive locking is not permitted to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		     "upgrade to EX level from PR level.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	 *  case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	 *  ignore the lock level and just update it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	if (ret_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		status = ocfs2_inode_lock_full(inode, ret_bh, ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 					       OCFS2_META_LOCK_GETBH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 			if (status != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 				mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	return tmp_oh ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) void ocfs2_inode_unlock_tracker(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 				int ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 				struct ocfs2_lock_holder *oh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 				int had_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	/* had_lock means that the currect process already takes the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	 * lock previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	 * If had_lock is 1, we have nothing to do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	 * If had_lock is 0, we will release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	if (!had_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		ocfs2_inode_unlock(inode, oh->oh_ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		ocfs2_remove_holder(lockres, oh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	struct ocfs2_orphan_scan_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	lockres = &osb->osb_orphan_scan.os_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	    lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		*seqno = be32_to_cpu(lvb->lvb_os_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		*seqno = osb->osb_orphan_scan.os_seqno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	struct ocfs2_orphan_scan_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		lockres = &osb->osb_orphan_scan.os_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		lvb->lvb_os_seqno = cpu_to_be32(seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) int ocfs2_super_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		     int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	/* The super block lock path is really in the best position to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	 * know when resources covered by the lock need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	 * refreshed, so we do it here. Of course, making sense of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	 * everything is up to the caller :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	status = ocfs2_should_refresh_lock_res(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		status = ocfs2_refresh_slot_info(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		ocfs2_complete_lock_res_refresh(lockres, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		ocfs2_track_lock_refresh(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) void ocfs2_super_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	if (!ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int ocfs2_rename_lock(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) void ocfs2_rename_unlock(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	if (!ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		down_write(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		down_read(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 				    0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 			up_write(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 			up_read(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	if (!ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		ocfs2_cluster_unlock(osb, lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 				     ex ? LKM_EXMODE : LKM_PRMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		up_write(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		up_read(&osb->nfs_sync_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		       struct ocfs2_trim_fs_info *info, int trylock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	struct ocfs2_trim_fs_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	if (info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		info->tf_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 				    trylock ? DLM_LKF_NOQUEUE : 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		if (status != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	if (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		    lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 			info->tf_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 			info->tf_success = lvb->lvb_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 			info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 			info->tf_start = be64_to_cpu(lvb->lvb_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 			info->tf_len = be64_to_cpu(lvb->lvb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 			info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 			info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			  struct ocfs2_trim_fs_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	struct ocfs2_trim_fs_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	if (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 		lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		lvb->lvb_success = info->tf_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		lvb->lvb_start = cpu_to_be64(info->tf_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		lvb->lvb_len = cpu_to_be64(info->tf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) int ocfs2_dentry_lock(struct dentry *dentry, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	BUG_ON(!dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	if (ocfs2_is_hard_readonly(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 			return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) /* Reference counting of the dlm debug structure. We want this because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)  * open references on the debug inodes can live on after a mount, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)  * we can't rely on the ocfs2_super to always exist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) static void ocfs2_dlm_debug_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	struct ocfs2_dlm_debug *dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	kfree(dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	if (dlm_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	kref_get(&debug->d_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	struct ocfs2_dlm_debug *dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	if (!dlm_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		mlog_errno(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	kref_init(&dlm_debug->d_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	dlm_debug->d_filter_secs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	return dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) /* Access to this is arbitrated for us via seq_file->sem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) struct ocfs2_dlm_seq_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	struct ocfs2_dlm_debug *p_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	struct ocfs2_lock_res p_iter_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	struct ocfs2_lock_res p_tmp_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 						 struct ocfs2_dlm_seq_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	struct ocfs2_lock_res *iter, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	assert_spin_locked(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		/* discover the head of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 			mlog(0, "End of list found, %p\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 		/* We track our "dummy" iteration lockres' by a NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		 * l_ops field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 		if (iter->l_ops != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 			ret = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	struct ocfs2_dlm_seq_priv *priv = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	struct ocfs2_lock_res *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	spin_lock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	if (iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		/* Since lockres' have the lifetime of their container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		 * (which can be inodes, ocfs2_supers, etc) we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		 * copy this out to a temporary lockres while still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		 * under the spinlock. Obviously after this we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		 * trust any pointers on the copy returned, but that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		 * ok as the information we want isn't typically held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		 * in them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		priv->p_tmp_res = *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		iter = &priv->p_tmp_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	spin_unlock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 	struct ocfs2_dlm_seq_priv *priv = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	struct ocfs2_lock_res *iter = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	struct ocfs2_lock_res *dummy = &priv->p_iter_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	spin_lock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	iter = ocfs2_dlm_next_res(iter, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	list_del_init(&dummy->l_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	if (iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		list_add(&dummy->l_debug_list, &iter->l_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		priv->p_tmp_res = *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 		iter = &priv->p_tmp_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	spin_unlock(&ocfs2_dlm_tracking_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)  * Version is used by debugfs.ocfs2 to determine the format being used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)  * New in version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)  *	- Lock stats printed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)  * New in version 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)  *	- Max time in lock stats is in usecs (instead of nsecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)  * New in version 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)  *	- Add last pr/ex unlock times and first lock wait time in usecs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) #define OCFS2_DLM_DEBUG_STR_VERSION 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	char *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	struct ocfs2_lock_res *lockres = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) #ifdef CONFIG_OCFS2_FS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	u64 now, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	struct ocfs2_dlm_debug *dlm_debug =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 			((struct ocfs2_dlm_seq_priv *)m->private)->p_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	if (!lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) #ifdef CONFIG_OCFS2_FS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		now = ktime_to_us(ktime_get_real());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		if (lockres->l_lock_prmode.ls_last >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		    lockres->l_lock_exmode.ls_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 			last = lockres->l_lock_prmode.ls_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 			last = lockres->l_lock_exmode.ls_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		 * Use d_filter_secs field to filter lock resources dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		 * the default d_filter_secs(0) value filters nothing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		 * otherwise, only dump the last N seconds active lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		 * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		if (div_u64(now - last, 1000000) > dlm_debug->d_filter_secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 			   lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 			   (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 		seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	seq_printf(m, "%d\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 		   "0x%lx\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 		   "0x%x\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		   "0x%x\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		   "%d\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		   "%d\t",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		   lockres->l_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		   lockres->l_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 		   lockres->l_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		   lockres->l_unlock_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		   lockres->l_ro_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		   lockres->l_ex_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		   lockres->l_requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		   lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	/* Dump the raw LVB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	for(i = 0; i < DLM_LVB_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		seq_printf(m, "0x%x\t", lvb[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) #ifdef CONFIG_OCFS2_FS_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) # define lock_num_prmode(_l)		((_l)->l_lock_prmode.ls_gets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) # define lock_num_exmode(_l)		((_l)->l_lock_exmode.ls_gets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) # define lock_num_prmode_failed(_l)	((_l)->l_lock_prmode.ls_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) # define lock_num_exmode_failed(_l)	((_l)->l_lock_exmode.ls_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) # define lock_total_prmode(_l)		((_l)->l_lock_prmode.ls_total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) # define lock_total_exmode(_l)		((_l)->l_lock_exmode.ls_total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) # define lock_max_prmode(_l)		((_l)->l_lock_prmode.ls_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) # define lock_max_exmode(_l)		((_l)->l_lock_exmode.ls_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) # define lock_refresh(_l)		((_l)->l_lock_refresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) # define lock_last_prmode(_l)		((_l)->l_lock_prmode.ls_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) # define lock_last_exmode(_l)		((_l)->l_lock_exmode.ls_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) # define lock_wait(_l)			((_l)->l_lock_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) # define lock_num_prmode(_l)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) # define lock_num_exmode(_l)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) # define lock_num_prmode_failed(_l)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) # define lock_num_exmode_failed(_l)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) # define lock_total_prmode(_l)		(0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) # define lock_total_exmode(_l)		(0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) # define lock_max_prmode(_l)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) # define lock_max_exmode(_l)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) # define lock_refresh(_l)		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) # define lock_last_prmode(_l)		(0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) # define lock_last_exmode(_l)		(0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) # define lock_wait(_l)			(0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	/* The following seq_print was added in version 2 of this output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	seq_printf(m, "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		   "%llu\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		   "%llu\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		   "%u\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		   "%llu\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		   "%llu\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		   "%llu\t",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		   lock_num_prmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		   lock_num_exmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		   lock_num_prmode_failed(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		   lock_num_exmode_failed(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		   lock_total_prmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 		   lock_total_exmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		   lock_max_prmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		   lock_max_exmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		   lock_refresh(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		   lock_last_prmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		   lock_last_exmode(lockres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		   lock_wait(lockres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	/* End the line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	seq_printf(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) static const struct seq_operations ocfs2_dlm_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	.start =	ocfs2_dlm_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	.stop =		ocfs2_dlm_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	.next =		ocfs2_dlm_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	.show =		ocfs2_dlm_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	struct seq_file *seq = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	struct ocfs2_dlm_seq_priv *priv = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	struct ocfs2_lock_res *res = &priv->p_iter_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	ocfs2_remove_lockres_tracking(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	ocfs2_put_dlm_debug(priv->p_dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	return seq_release_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	struct ocfs2_dlm_seq_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	struct ocfs2_super *osb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		mlog_errno(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	osb = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	ocfs2_get_dlm_debug(osb->osb_dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	priv->p_dlm_debug = osb->osb_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	ocfs2_add_lockres_tracking(&priv->p_iter_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 				   priv->p_dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) static const struct file_operations ocfs2_dlm_debug_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	.open =		ocfs2_dlm_debug_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	.release =	ocfs2_dlm_debug_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	.read =		seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	.llseek =	seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 			    osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 			   &dlm_debug->d_filter_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	ocfs2_get_dlm_debug(dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	if (dlm_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		ocfs2_put_dlm_debug(dlm_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) int ocfs2_dlm_init(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	struct ocfs2_cluster_connection *conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	if (ocfs2_mount_local(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		osb->node_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		goto local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	ocfs2_dlm_init_debug(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	/* launch downconvert thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			osb->uuid_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	if (IS_ERR(osb->dc_task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		status = PTR_ERR(osb->dc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		osb->dc_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	/* for now, uuid == domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	status = ocfs2_cluster_connect(osb->osb_cluster_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 				       osb->osb_cluster_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 				       strlen(osb->osb_cluster_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 				       osb->uuid_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 				       strlen(osb->uuid_str),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 				       &lproto, ocfs2_do_node_down, osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 				       &conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	status = ocfs2_cluster_this_node(conn, &osb->node_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		mlog(ML_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		     "could not find this host's node number\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		ocfs2_cluster_disconnect(conn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) local:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	ocfs2_nfs_sync_lock_init(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	osb->cconn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		ocfs2_dlm_shutdown_debug(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		if (osb->dc_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 			kthread_stop(osb->dc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 			int hangup_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	ocfs2_drop_osb_locks(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	 * Now that we have dropped all locks and ocfs2_dismount_volume()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	 * has disabled recovery, the DLM won't be talking to us.  It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	 * safe to tear things down before disconnecting the cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	if (osb->dc_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		kthread_stop(osb->dc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		osb->dc_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	ocfs2_lock_res_free(&osb->osb_super_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	ocfs2_lock_res_free(&osb->osb_rename_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	osb->cconn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	ocfs2_dlm_shutdown_debug(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) static int ocfs2_drop_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 			   struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	u32 lkm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	/* We didn't get anywhere near actually using this lockres. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 		lkm_flags |= DLM_LKF_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			"lockres %s, flags 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 			lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	while (lockres->l_flags & OCFS2_LOCK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		     "%u, unlock_action = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		     lockres->l_name, lockres->l_flags, lockres->l_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		     lockres->l_unlock_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 		/* XXX: Today we just wait on any busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 		 * locks... Perhaps we need to cancel converts in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		 * future? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		ocfs2_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 		spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		    lockres->l_level == DLM_LOCK_EX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		    !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			lockres->l_ops->set_lvb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	if (lockres->l_flags & OCFS2_LOCK_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 		mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	/* make sure we never get here while waiting for an ast to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	 * fire. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	/* is this necessary? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	mlog(0, "lock %s\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		ocfs2_dlm_dump_lksb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	ocfs2_wait_on_busy_lock(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 				       struct ocfs2_lock_res *lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) /* Mark the lockres as being dropped. It will no longer be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)  * queued if blocking, but we still may have to wait on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)  * being dequeued from the downconvert thread before we can consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)  * it safe to drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)  * You can *not* attempt to call cluster_lock on this lockres anymore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 				struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	struct ocfs2_mask_waiter mw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	unsigned long flags, flags2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	ocfs2_init_mask_waiter(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	lockres->l_flags |= OCFS2_LOCK_FREEING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		 * We know the downconvert is queued but not in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		 * because we are the downconvert thread and processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 		 * different lock. So we can just remove the lock from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 		 * queue. This is not only an optimization but also a way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		 * to avoid the following deadlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		 *   ocfs2_dentry_post_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		 *     ocfs2_dentry_lock_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		 *       ocfs2_drop_dentry_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		 *         iput()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		 *           ocfs2_evict_inode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		 *             ocfs2_clear_inode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 		 *               ocfs2_mark_lockres_freeing()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 		 *                 ... blocks waiting for OCFS2_LOCK_QUEUED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		 *                 since we are the downconvert thread which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		 *                 should clear the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		spin_lock_irqsave(&osb->dc_task_lock, flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		list_del_init(&lockres->l_blocked_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		osb->blocked_lock_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 		spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		 * Warn if we recurse into another post_unlock call.  Strictly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		 * speaking it isn't a problem but we need to be careful if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		 * that happens (stack overflow, deadlocks, ...) so warn if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		 * ocfs2 grows a path for which this can happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		WARN_ON_ONCE(lockres->l_ops->post_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		/* Since the lock is freeing we don't do much in the fn below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 		ocfs2_process_blocked_lock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		mlog(0, "Waiting on lockres %s\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		status = ocfs2_wait_for_mask(&mw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 		spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 			       struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	ocfs2_mark_lockres_freeing(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	ret = ocfs2_drop_lock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) int ocfs2_drop_inode_locks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	int status, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	/* No need to call ocfs2_mark_lockres_freeing here -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	 * ocfs2_clear_inode has done it for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 			      &OCFS2_I(inode)->ip_open_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 		mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 			      &OCFS2_I(inode)->ip_inode_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 		mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	if (err < 0 && !status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 			      &OCFS2_I(inode)->ip_rw_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		mlog_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	if (err < 0 && !status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 		status = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 					      int new_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	if (lockres->l_level <= new_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 		     "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		     "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 		     new_level, list_empty(&lockres->l_blocked_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 		     list_empty(&lockres->l_mask_waiters), lockres->l_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 		     lockres->l_flags, lockres->l_ro_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 		     lockres->l_ex_holders, lockres->l_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 		     lockres->l_unlock_action, lockres->l_requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 		     lockres->l_blocking, lockres->l_pending_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 	mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	     lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	lockres->l_action = OCFS2_AST_DOWNCONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	lockres->l_requested = new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	return lockres_set_pending(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 				  struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 				  int new_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 				  int lvb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 				  unsigned int generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	u32 dlm_flags = DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	     lockres->l_level, new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	 * we can recover correctly from node failure. Otherwise, we may get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	if (ocfs2_userspace_stack(osb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	    lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 		lvb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	if (lvb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 		dlm_flags |= DLM_LKF_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	ret = ocfs2_dlm_lock(osb->cconn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 			     new_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 			     &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			     dlm_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 			     lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 			     OCFS2_LOCK_ID_MAX_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	lockres_clear_pending(lockres, generation, osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 		ocfs2_recover_from_dlm_error(lockres, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 				        struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 		/* If we're already trying to cancel a lock conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 		 * then just drop the spinlock and allow the caller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 		 * requeue this lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 		mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	/* were we in a convert when we got the bast fire? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	       lockres->l_action != OCFS2_AST_DOWNCONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	/* set things up for the unlockast to know to just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	 * clear out the ast_action and unset busy, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 			"lock %s, invalid flags: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 			lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static int ocfs2_cancel_convert(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 				struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 			       DLM_LKF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 		ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 		ocfs2_recover_from_dlm_error(lockres, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) static int ocfs2_unblock_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 			      struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 			      struct ocfs2_unblock_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	int blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	int new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	int set_lvb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	unsigned int gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) recheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	 * Is it still blocking? If not, we have no more work to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	if (lockres->l_flags & OCFS2_LOCK_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		/* XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		 * This is a *big* race.  The OCFS2_LOCK_PENDING flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		 * exists entirely for one reason - another thread has set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 		 * If we do ocfs2_cancel_convert() before the other thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 		 * calls dlm_lock(), our cancel will do nothing.  We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 		 * get no ast, and we will have no way of knowing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		 * cancel failed.  Meanwhile, the other thread will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		 * into dlm_lock() and wait...forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		 * Why forever?  Because another node has asked for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		 * lock first; that's why we're here in unblock_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		 * The solution is OCFS2_LOCK_PENDING.  When PENDING is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		 * set, we just requeue the unblock.  Only when the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		 * thread has called dlm_lock() and cleared PENDING will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		 * we then cancel their request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 		 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		 * at the same time they set OCFS2_DLM_BUSY.  They must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 		 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		if (lockres->l_flags & OCFS2_LOCK_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 			mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 			     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 		ctl->requeue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		ret = ocfs2_prepare_cancel_convert(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 			ret = ocfs2_cancel_convert(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 				mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 		goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	 * set when the ast is received for an upconvert just before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 	 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	 * on the heels of the ast, we want to delay the downconvert just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	 * enough to allow the up requestor to do its task. Because this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	 * lock is in the blocked queue, the lock will be downconverted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	 * as soon as the requestor is done with the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	 * How can we block and yet be at NL?  We were trying to upconvert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	 * from NL and got canceled.  The code comes back here, and now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	 * we notice and clear BLOCKING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	if (lockres->l_level == DLM_LOCK_NL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 		BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 		lockres->l_blocking = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 		goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	/* if we're blocking an exclusive and we have *any* holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	 * then requeue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	if ((lockres->l_blocking == DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	    && (lockres->l_ex_holders || lockres->l_ro_holders)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		     lockres->l_name, lockres->l_ex_holders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		     lockres->l_ro_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 	/* If it's a PR we're blocking, then only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	 * requeue if we've got any EX holders */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	if (lockres->l_blocking == DLM_LOCK_PR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	    lockres->l_ex_holders) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 		mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 		     lockres->l_name, lockres->l_ex_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 		goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	 * Can we get a lock in this state if the holder counts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	 * zero? The meta data unblock code used to check this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	    && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 		     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 		goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 	new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	if (lockres->l_ops->check_downconvert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	    && !lockres->l_ops->check_downconvert(lockres, new_level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 		     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		goto leave_requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 	/* If we get here, then we know that there are no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	 * incompatible holders (and anyone asking for an incompatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 	 * lock is blocked). We can now downconvert the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	if (!lockres->l_ops->downconvert_worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 		goto downconvert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 	/* Some lockres types want to do a bit of work before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	 * downconverting a lock. Allow that here. The worker function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	 * may sleep, so we save off a copy of what we're blocking as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	 * it may change while we're not holding the spin lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	blocking = lockres->l_blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	level = lockres->l_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	if (ctl->unblock_action == UNBLOCK_STOP_POST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		     lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 	if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		/* If this changed underneath us, then we can't drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		 * it just yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 		mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 		     "Recheck\n", lockres->l_name, blocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		     lockres->l_blocking, level, lockres->l_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 		goto recheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) downconvert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	ctl->requeue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 	if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 		if (lockres->l_level == DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 			set_lvb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		 * We only set the lvb if the lock has been fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 		 * refreshed - otherwise we risk setting stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		 * data. Otherwise, there's no need to actually clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 		 * out the lvb here as it's value is still valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 			lockres->l_ops->set_lvb(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 	gen = ocfs2_prepare_downconvert(lockres, new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 				     gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) leave_requeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	ctl->requeue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 				     int blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	struct ocfs2_inode_info *oi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929)        	inode = ocfs2_lock_res_inode(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 	if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 		oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		oi->ip_dir_lock_gen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		goto out_forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	 * We need this before the filemap_fdatawrite() so that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	 * transfer the dirty bit from the PTE to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 	 * page. Unfortunately this means that even for EX->PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	 * downconverts, we'll lose our mappings and have to build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	 * them up again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	unmap_mapping_range(mapping, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	if (filemap_fdatawrite(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 		mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	sync_mapping_buffers(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	if (blocking == DLM_LOCK_EX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		truncate_inode_pages(mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 		/* We only need to wait on the I/O if we're not also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 		 * truncating pages because truncate_inode_pages waits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 		 * for us above. We don't truncate pages if we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		 * blocking anything < EXMODE because we want to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 		 * them around in that case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 		filemap_fdatawait(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) out_forget:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	forget_all_cached_acls(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	return UNBLOCK_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 				 struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 				 int new_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	int checkpointed = ocfs2_ci_fully_checkpointed(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	if (checkpointed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 					int new_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	struct inode *inode = ocfs2_lock_res_inode(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	struct inode *inode = ocfs2_lock_res_inode(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	__ocfs2_stuff_meta_lvb(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006)  * Does the final reference drop on our dentry lock. Right now this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)  * happens in the downconvert thread, but we could choose to simplify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)  * dlmglue API and push these off to the ocfs2_wq in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 				     struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	ocfs2_dentry_lock_put(osb, dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018)  * d_delete() matching dentries before the lock downconvert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)  * At this point, any process waiting to destroy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)  * dentry_lock due to last ref count is stopped by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)  * OCFS2_LOCK_QUEUED flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)  * We have two potential problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)  * 1) If we do the last reference drop on our dentry_lock (via dput)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)  *    we'll wind up in ocfs2_release_dentry_lock(), waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)  *    the downconvert to finish. Instead we take an elevated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)  *    reference and push the drop until after we've completed our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)  *    unblock processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)  * 2) There might be another process with a final reference,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)  *    waiting on us to finish processing. If this is the case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)  *    detect it and exit out - there's no more dentries anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 				       int blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 	struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 	struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	int extra_ref = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	 * This node is blocking another node from getting a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 	 * lock. This happens when we've renamed within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	 * directory. We've forced the other nodes to d_delete(), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	 * we never actually dropped our lock because it's still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	 * valid. The downconvert code will retain a PR for this node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	 * so there's no further work to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	if (blocking == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 		return UNBLOCK_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	 * Mark this inode as potentially orphaned. The code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	 * ocfs2_delete_inode() will figure out whether it actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	 * needs to be freed or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 	oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	 * Yuck. We need to make sure however that the check of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 	 * OCFS2_LOCK_FREEING and the extra reference are atomic with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	 * respect to a reference decrement or the setting of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	 * flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	spin_lock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	    && dl->dl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 		dl->dl_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 		extra_ref = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	spin_unlock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	mlog(0, "extra_ref = %d\n", extra_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	 * We have a process waiting on us in ocfs2_dentry_iput(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 	 * which means we can't have any more outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	 * aliases. There's no need to do any more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	if (!extra_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		return UNBLOCK_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 	spin_lock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 		dentry = ocfs2_find_local_alias(dl->dl_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 						dl->dl_parent_blkno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 		if (!dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 		spin_unlock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 		if (S_ISDIR(dl->dl_inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 			shrink_dcache_parent(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 		mlog(0, "d_delete(%pd);\n", dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 		 * The following dcache calls may do an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 		 * iput(). Normally we don't want that from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 		 * downconverting thread, but in this case it's ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		 * because the requesting node already has an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 		 * exclusive lock on the inode, so it can't be queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 		 * for a downconvert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 		d_delete(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 		dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 		spin_lock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	spin_unlock(&dentry_attach_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	 * If we are the last holder of this dentry lock, there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 	 * reason to downconvert so skip straight to the unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	if (dl->dl_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 		return UNBLOCK_STOP_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	return UNBLOCK_CONTINUE_POST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 					    int new_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	struct ocfs2_refcount_tree *tree =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 				ocfs2_lock_res_refcount_tree(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 	return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 					 int blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	struct ocfs2_refcount_tree *tree =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 				ocfs2_lock_res_refcount_tree(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	ocfs2_metadata_cache_purge(&tree->rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	return UNBLOCK_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	struct ocfs2_qinfo_lvb *lvb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 					    oinfo->dqi_gi.dqi_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 	lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 		ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 					    oinfo->dqi_gi.dqi_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	struct ocfs2_global_disk_dqinfo *gdinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	    lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 		info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 		info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 		oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 		oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		oinfo->dqi_gi.dqi_free_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 					be32_to_cpu(lvb->lvb_free_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 		status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 						     oinfo->dqi_giblk, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 			mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 			goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 		gdinfo = (struct ocfs2_global_disk_dqinfo *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 					(bh->b_data + OCFS2_GLOBAL_INFO_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 		oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 		oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 		oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 		oinfo->dqi_gi.dqi_free_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 					le32_to_cpu(gdinfo->dqi_free_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 		ocfs2_track_lock_refresh(lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) /* Lock quota info, this function expects at least shared lock on the quota file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)  * so that we can safely refresh quota info from disk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 	struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	/* On RO devices, locking really isn't needed... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	if (ocfs2_is_hard_readonly(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		if (ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 			status = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 	if (!ocfs2_should_refresh_lock_res(lockres))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	/* OK, we have the lock but we need to refresh the quota info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 	status = ocfs2_refresh_qinfo(oinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		ocfs2_qinfo_unlock(oinfo, ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	ocfs2_complete_lock_res_refresh(lockres, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	struct ocfs2_super *osb = lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	if (ocfs2_is_hard_readonly(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	if (ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 	struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	struct ocfs2_super *osb = lockres->l_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 	if (!ocfs2_mount_local(osb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 		ocfs2_cluster_unlock(osb, lockres, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 				       struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 	struct ocfs2_unblock_ctl ctl = {0, 0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 	/* Our reference to the lockres in this function can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	 * considered valid until we remove the OCFS2_LOCK_QUEUED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	 * flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	BUG_ON(!lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	BUG_ON(!lockres->l_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	/* Detect whether a lock has been marked as going away while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	 * the downconvert thread was processing other things. A lock can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	 * still be marked with OCFS2_LOCK_FREEING after this check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	 * but short circuiting here will still save us some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	 * performance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 	if (lockres->l_flags & OCFS2_LOCK_FREEING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 		goto unqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 	status = ocfs2_unblock_lock(osb, lockres, &ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 		mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	spin_lock_irqsave(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) unqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 	if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 		ocfs2_schedule_blocked_lock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 	     ctl.requeue ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	spin_unlock_irqrestore(&lockres->l_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	if (ctl.unblock_action != UNBLOCK_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	    && lockres->l_ops->post_unlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		lockres->l_ops->post_unlock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 					struct ocfs2_lock_res *lockres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	assert_spin_locked(&lockres->l_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	if (lockres->l_flags & OCFS2_LOCK_FREEING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 		/* Do not schedule a lock for downconvert when it's on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 		 * the way to destruction - any nodes wanting access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 		 * to the resource will get it soon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 		mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		     lockres->l_name, lockres->l_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 	lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	if (list_empty(&lockres->l_blocked_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 		list_add_tail(&lockres->l_blocked_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 			      &osb->blocked_lock_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		osb->blocked_lock_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	unsigned long processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	struct ocfs2_lock_res *lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	/* grab this early so we know to try again if a state change and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	 * wake happens part-way through our work  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	osb->dc_work_sequence = osb->dc_wake_sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 	processed = osb->blocked_lock_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 	 * blocked lock processing in this loop might call iput which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 	 * remove items off osb->blocked_lock_list. Downconvert up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	 * 'processed' number of locks, but stop short if we had some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 	 * removed in ocfs2_mark_lockres_freeing when downconverting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	while (processed && !list_empty(&osb->blocked_lock_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 		lockres = list_entry(osb->blocked_lock_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 				     struct ocfs2_lock_res, l_blocked_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 		list_del_init(&lockres->l_blocked_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 		osb->blocked_lock_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 		spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 		BUG_ON(!processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 		processed--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 		ocfs2_process_blocked_lock(osb, lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 		spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 	int empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 	spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	if (list_empty(&osb->blocked_lock_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 		empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 	return empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 	int should_wake = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 	if (osb->dc_work_sequence != osb->dc_wake_sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 		should_wake = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	return should_wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) static int ocfs2_downconvert_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	struct ocfs2_super *osb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	/* only quit once we've been asked to stop and there is no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 	 * work available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 	while (!(kthread_should_stop() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		ocfs2_downconvert_thread_lists_empty(osb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 		wait_event_interruptible(osb->dc_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 					 ocfs2_downconvert_thread_should_wake(osb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 					 kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		mlog(0, "downconvert_thread: awoken\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 		ocfs2_downconvert_thread_do_work(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	osb->dc_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 	spin_lock_irqsave(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	/* make sure the voting thread gets a swipe at whatever changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 	 * the caller may have made to the voting state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 	osb->dc_wake_sequence++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 	spin_unlock_irqrestore(&osb->dc_task_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 	wake_up(&osb->dc_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) }