Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/rculist_bl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/bit_spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/lockref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "lops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include "trace_gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) struct gfs2_glock_iter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct gfs2_sbd *sdp;		/* incore superblock           */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct rhashtable_iter hti;	/* rhashtable iterator         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct gfs2_glock *gl;		/* current glock struct        */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	loff_t last_pos;		/* last position               */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) typedef void (*glock_examiner) (struct gfs2_glock * gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static struct dentry *gfs2_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static struct workqueue_struct *glock_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) struct workqueue_struct *gfs2_delete_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static LIST_HEAD(lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static atomic_t lru_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static DEFINE_SPINLOCK(lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define GFS2_GL_HASH_SHIFT      15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static const struct rhashtable_params ht_parms = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.key_len = offsetofend(struct lm_lockname, ln_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	.key_offset = offsetof(struct gfs2_glock, gl_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.head_offset = offsetof(struct gfs2_glock, gl_node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static struct rhashtable gl_hash_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define GLOCK_WAIT_TABLE_BITS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) struct wait_glock_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct lm_lockname *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	wait_queue_entry_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			       int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct wait_glock_queue *wait_glock =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		container_of(wait, struct wait_glock_queue, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct lm_lockname *wait_name = wait_glock->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct lm_lockname *wake_name = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	if (wake_name->ln_sbd != wait_name->ln_sbd ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	    wake_name->ln_number != wait_name->ln_number ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	    wake_name->ln_type != wait_name->ln_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	return autoremove_wake_function(wait, mode, sync, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * wake_up_glock  -  Wake up waiters on a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static void wake_up_glock(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (waitqueue_active(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static void gfs2_glock_dealloc(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	kfree(gl->gl_lksb.sb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (gl->gl_ops->go_flags & GLOF_ASPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		kmem_cache_free(gfs2_glock_cachep, gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * glock_blocked_by_withdraw - determine if we can still use a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * when we're withdrawn. For example, to maintain metadata integrity, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * iopen or the transaction glocks may be safely used because none of their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * metadata goes through the journal. So in general, we should disallow all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * glocks that are journaled, and allow all the others. One exception is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * we need to allow our active journal to be promoted and demoted so others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * may recover it and we can reacquire it when they're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	if (likely(!gfs2_withdrawn(sdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (gl->gl_ops->go_flags & GLOF_NONDISK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	if (!sdp->sd_jdesc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	    gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) void gfs2_glock_free(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	wake_up_glock(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		wake_up(&sdp->sd_glock_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * gfs2_glock_hold() - increment reference count on glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * @gl: The glock to hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) void gfs2_glock_hold(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	lockref_get(&gl->gl_lockref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * demote_ok - Check to see if it's ok to unlock a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * Returns: 1 if it's ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static int demote_ok(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	const struct gfs2_glock_operations *glops = gl->gl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (gl->gl_state == LM_ST_UNLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (!list_empty(&gl->gl_holders))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	if (glops->go_demote_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		return glops->go_demote_ok(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (!(gl->gl_ops->go_flags & GLOF_LRU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	spin_lock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	list_del(&gl->gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	list_add_tail(&gl->gl_lru, &lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	if (!test_bit(GLF_LRU, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		set_bit(GLF_LRU, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		atomic_inc(&lru_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	spin_unlock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (!(gl->gl_ops->go_flags & GLOF_LRU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	spin_lock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (test_bit(GLF_LRU, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		list_del_init(&gl->gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		atomic_dec(&lru_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		clear_bit(GLF_LRU, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	spin_unlock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * Enqueue the glock on the work queue.  Passes one glock reference on to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * work queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		 * We are holding the lockref spinlock, and the work was still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		 * queued above.  The queued work (glock_work_func) takes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		 * spinlock before dropping its glock reference(s), so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		 * cannot have dropped them in the meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		gl->gl_lockref.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	__gfs2_glock_queue_work(gl, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void __gfs2_glock_put(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct address_space *mapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	lockref_mark_dead(&gl->gl_lockref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	gfs2_glock_remove_from_lru(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		truncate_inode_pages_final(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		if (!gfs2_withdrawn(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			GLOCK_BUG_ON(gl, mapping->nrpages ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				     mapping->nrexceptional);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	trace_gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * Cause the glock to be put in work queue context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) void gfs2_glock_queue_put(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * gfs2_glock_put() - Decrement reference count on glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * @gl: The glock to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) void gfs2_glock_put(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (lockref_put_or_lock(&gl->gl_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	__gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  * may_grant - check if its ok to grant a new lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * @gl: The glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  * @gh: The lock request which we wish to grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * Returns: true if its ok to grant the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (gl->gl_state == gh->gh_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (gh->gh_flags & GL_EXACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static void gfs2_holder_wake(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	clear_bit(HIF_WAIT, &gh->gh_iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (gh->gh_flags & GL_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		wake_up(&sdp->sd_async_glock_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * do_error - Something unexpected has happened during a lock request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static void do_error(struct gfs2_glock *gl, const int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct gfs2_holder *gh, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		if (ret & LM_OUT_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			gh->gh_error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			gh->gh_error = GLR_TRYFAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		list_del_init(&gh->gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		trace_gfs2_glock_queue(gh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		gfs2_holder_wake(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * do_promote - promote as many requests as possible on the current queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  * @gl: The glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * Returns: 1 if there is a blocked holder at the head of the list, or 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  *          if a type specific operation is underway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static int do_promote(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) __releases(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) __acquires(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	const struct gfs2_glock_operations *glops = gl->gl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct gfs2_holder *gh, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (may_grant(gl, gh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			if (gh->gh_list.prev == &gl->gl_holders &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			    glops->go_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				/* FIXME: eliminate this eventually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				ret = glops->go_lock(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 					if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 						return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 					gh->gh_error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 					list_del_init(&gh->gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 					trace_gfs2_glock_queue(gh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 					gfs2_holder_wake(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 					goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 				set_bit(HIF_HOLDER, &gh->gh_iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				trace_gfs2_promote(gh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				gfs2_holder_wake(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			set_bit(HIF_HOLDER, &gh->gh_iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			trace_gfs2_promote(gh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			gfs2_holder_wake(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		if (gh->gh_list.prev == &gl->gl_holders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		do_error(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * find_first_waiter - find the first gh that's waiting for the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			return gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * state_change - record that the glock is now in a different state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * @new_state the new state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static void state_change(struct gfs2_glock *gl, unsigned int new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	int held1, held2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	held2 = (new_state != LM_ST_UNLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (held1 != held2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		if (held2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			gl->gl_lockref.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (new_state != gl->gl_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		/* shorten our minimum hold time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 				       GL_GLOCK_MIN_HOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	gl->gl_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	gl->gl_tchange = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static void gfs2_set_demote(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	set_bit(GLF_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	wake_up(&sdp->sd_async_glock_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static void gfs2_demote_wake(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * finish_xmote - The DLM has replied to one of our lock requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * @gl: The glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * @ret: The status from the DLM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	const struct gfs2_glock_operations *glops = gl->gl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	unsigned state = ret & LM_OUT_ST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	trace_gfs2_glock_state_change(gl, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	state_change(gl, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	gh = find_first_waiter(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	/* Demote to UN request arrived during demote to SH or DF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		gl->gl_target = LM_ST_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Check for state != intended state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (unlikely(state != gl->gl_target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			/* move to back of queue and try next entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			if (ret & LM_OUT_CANCELED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 					list_move_tail(&gh->gh_list, &gl->gl_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 				gh = find_first_waiter(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				gl->gl_target = gh->gh_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 				goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			/* Some error or failed "try lock" - report it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			if ((ret & LM_OUT_ERROR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 				gl->gl_target = gl->gl_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				do_error(gl, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		switch(state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		/* Unlocked due to conversion deadlock, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		case LM_ST_UNLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			do_xmote(gl, gh, gl->gl_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		/* Conversion fails, unlock and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		case LM_ST_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		case LM_ST_DEFERRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			do_xmote(gl, gh, LM_ST_UNLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		default: /* Everything else */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			       gl->gl_target, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			GLOCK_BUG_ON(gl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* Fast path - we got what we asked for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		gfs2_demote_wake(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (state != LM_ST_UNLOCKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		if (glops->go_xmote_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			rv = glops->go_xmote_bh(gl, gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 				do_error(gl, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		rv = do_promote(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (rv == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			goto out_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	clear_bit(GLF_LOCK, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) out_locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static bool is_system_glock(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (gl == m_ip->i_gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * do_xmote - Calls the DLM to change the state of a lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * @gl: The lock state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * @gh: The holder (only for promotes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * @target: The target lock state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) __releases(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) __acquires(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	const struct gfs2_glock_operations *glops = gl->gl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	    gh && !(gh->gh_flags & LM_FLAG_NOEXP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		      LM_FLAG_PRIORITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	GLOCK_BUG_ON(gl, gl->gl_state == target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	    glops->go_inval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		 * If another process is already doing the invalidate, let that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		 * finish first.  The glock state machine will get back to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		 * holder again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				     &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		do_error(gl, 0); /* Fail queued try locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	gl->gl_req = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	set_bit(GLF_BLOCKING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (glops->go_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		ret = glops->go_sync(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		/* If we had a problem syncing (due to io errors or whatever,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		 * we should not invalidate the metadata or tell dlm to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		 * release the glock to other nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				fs_err(sdp, "Error %d syncing glock \n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				gfs2_dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			goto skip_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		 * The call to go_sync should have cleared out the ail list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		 * If there are still items, we have a problem. We ought to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		 * withdraw, but we can't because the withdraw code also uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		 * glocks. Warn about the error, dump the glock, then fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		 * through and wait for logd to do the withdraw for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		if ((atomic_read(&gl->gl_ail_count) != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		    (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			gfs2_glock_assert_warn(gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 					       !atomic_read(&gl->gl_ail_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			gfs2_dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) skip_inval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	gfs2_glock_hold(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 * Check for an error encountered since we called go_sync and go_inval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 * If so, we can't withdraw from the glock code because the withdraw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * code itself uses glocks (see function signal_our_withdraw) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * change the mount to read-only. Most importantly, we must not call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 * dlm to unlock the glock until the journal is in a known good state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * (after journal replay) otherwise other nodes may use the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * (rgrp or dinode) and then later, journal replay will corrupt the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * file system. The best we can do here is wait for the logd daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * to see sd_log_error and withdraw, and in the meantime, requeue the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * work for later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 * We make a special exception for some system glocks, such as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * system statfs inode glock, which needs to be granted before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 * gfs2_quotad daemon can exit, and that exit needs to finish before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * we can unmount the withdrawn file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * However, if we're just unlocking the lock (say, for unmount, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * then it's okay to tell dlm to unlock it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		gfs2_withdraw_delayed(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (glock_blocked_by_withdraw(gl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	    (target != LM_ST_UNLOCKED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	     test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if (!is_system_glock(gl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		/* lock_dlm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		    target == LM_ST_UNLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			finish_xmote(gl, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		} else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			fs_err(sdp, "lm_lock ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	} else { /* lock_nolock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		finish_xmote(gl, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * find_first_holder - find the first "holder" gh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (!list_empty(&gl->gl_holders)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			return gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * run_queue - do all outstanding tasks related to a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * @gl: The glock in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * @nonblock: True if we must not block in run_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) static void run_queue(struct gfs2_glock *gl, const int nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) __releases(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) __acquires(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct gfs2_holder *gh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	    gl->gl_demote_state != gl->gl_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		if (find_first_holder(gl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (nonblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			goto out_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		gl->gl_target = gl->gl_demote_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			gfs2_demote_wake(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		ret = do_promote(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (ret == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		gh = find_first_waiter(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		gl->gl_target = gh->gh_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			do_error(gl, 0); /* Fail queued try locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	do_xmote(gl, gh, gl->gl_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) out_sched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	clear_bit(GLF_LOCK, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	__gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	clear_bit(GLF_LOCK, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (ri->ri_magic == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		ri->ri_generation_deleted = cpu_to_be64(generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	return generation <= be64_to_cpu(ri->ri_generation_deleted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) static void gfs2_glock_poke(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	struct gfs2_holder gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	error = gfs2_glock_nq(&gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		gfs2_glock_dq(&gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	gfs2_holder_uninit(&gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static bool gfs2_try_evict(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct gfs2_inode *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	bool evicted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 * If there is contention on the iopen glock and we have an inode, try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	 * to grab and release the inode so that it can be evicted.  This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * allow the remote node to go ahead and delete the inode without us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * having to do it, which will avoid rgrp glock thrashing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * The remote node is likely still holding the corresponding inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * glock, so it will run before we get to verify that the delete has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * happened below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (ip && !igrab(&ip->i_inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		ip = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		struct gfs2_glock *inode_gl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		gl->gl_no_formal_ino = ip->i_no_formal_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		d_prune_aliases(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		iput(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		/* If the inode was evicted, gl->gl_object will now be NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			inode_gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			lockref_get(&inode_gl->gl_lockref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		if (inode_gl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			gfs2_glock_poke(inode_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			gfs2_glock_put(inode_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		evicted = !ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return evicted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void delete_work_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct delayed_work *dwork = to_delayed_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	u64 no_addr = gl->gl_name.ln_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* If someone's using this glock to create a new dinode, the block must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	   have been freed by another node, then re-used, in which case our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	   iopen callback is too late after the fact. Ignore it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		 * If we can evict the inode, give the remote node trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		 * delete the inode some time before verifying that the delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		 * has happened.  Otherwise, if we cause contention on the inode glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * immediately, the remote node will think that we still have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 * the inode in use, and so it will give up waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 * If we can't evict the inode, signal to the remote node that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		 * the inode is still in use.  We'll later try to delete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		 * inode locally in gfs2_evict_inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		 * FIXME: We only need to verify that the remote node has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		 * deleted the inode because nodes before this remote delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		 * rework won't cooperate.  At a later time, when we no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		 * care about compatibility with such nodes, we can skip this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		 * step entirely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (gfs2_try_evict(gl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			if (gfs2_queue_delete_work(gl, 5 * HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				    GFS2_BLKST_UNLINKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (!IS_ERR_OR_NULL(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		d_prune_aliases(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static void glock_work_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	unsigned long delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	unsigned int drop_refs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		finish_xmote(gl, gl->gl_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		drop_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	    gl->gl_state != LM_ST_UNLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		unsigned long holdtime, now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		holdtime = gl->gl_tchange + gl->gl_hold_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		if (time_before(now, holdtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			delay = holdtime - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			gfs2_set_demote(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	run_queue(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		/* Keep one glock reference for the work we requeue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		drop_refs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		__gfs2_glock_queue_work(gl, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * Drop the remaining glock references manually here. (Mind that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 * here as well.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	gl->gl_lockref.count -= drop_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (!gl->gl_lockref.count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		__gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 					    struct gfs2_glock *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct wait_glock_queue wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	wait_queue_head_t *wq = glock_waitqueue(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	wait.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	init_wait(&wait.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	wait.wait.func = glock_wake_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			&new->gl_node, ht_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (IS_ERR(gl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		gl = rhashtable_lookup_fast(&gl_hash_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			name, ht_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	finish_wait(wq, &wait.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * @sdp: The GFS2 superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * @number: the lock number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * @glops: The glock_operations to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * @create: If 0, don't create the glock if it doesn't exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * @glp: the glock is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * This does not lock a glock, just finds/creates structures for one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		   const struct gfs2_glock_operations *glops, int create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		   struct gfs2_glock **glp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct super_block *s = sdp->sd_vfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct lm_lockname name = { .ln_number = number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				    .ln_type = glops->go_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				    .ln_sbd = sdp };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct gfs2_glock *gl, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	struct kmem_cache *cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	gl = find_insert_glock(&name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (gl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		*glp = gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	if (!create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (glops->go_flags & GLOF_ASPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		cachep = gfs2_glock_aspace_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		cachep = gfs2_glock_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	gl = kmem_cache_alloc(cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (glops->go_flags & GLOF_LVB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		if (!gl->gl_lksb.sb_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			kmem_cache_free(cachep, gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	atomic_inc(&sdp->sd_glock_disposal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	gl->gl_node.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	gl->gl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	gl->gl_name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	gl->gl_lockref.count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	gl->gl_state = LM_ST_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	gl->gl_target = LM_ST_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	gl->gl_ops = glops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	gl->gl_dstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	/* We use the global stats to estimate the initial per-glock stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	gl->gl_tchange = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	gl->gl_object = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	mapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)                 mapping->a_ops = &gfs2_meta_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		mapping->host = s->s_bdev->bd_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		mapping->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		mapping_set_gfp_mask(mapping, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		mapping->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		mapping->writeback_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	tmp = find_insert_glock(&name, gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		*glp = gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (IS_ERR(tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		ret = PTR_ERR(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	*glp = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	kfree(gl->gl_lksb.sb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	kmem_cache_free(cachep, gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		wake_up(&sdp->sd_glock_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * @state: the state we're requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * @flags: the modifier flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		      struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	INIT_LIST_HEAD(&gh->gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	gh->gh_gl = gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	gh->gh_ip = _RET_IP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	gh->gh_owner_pid = get_pid(task_pid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	gh->gh_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	gh->gh_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	gh->gh_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	gh->gh_iflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	gfs2_glock_hold(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * @state: the state we're requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * @flags: the modifier flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * Don't mess with the glock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	gh->gh_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	gh->gh_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	gh->gh_iflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	gh->gh_ip = _RET_IP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	put_pid(gh->gh_owner_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	gh->gh_owner_pid = get_pid(task_pid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void gfs2_holder_uninit(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	put_pid(gh->gh_owner_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	gfs2_glock_put(gh->gh_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	gfs2_holder_mark_uninitialized(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	gh->gh_ip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 					unsigned long start_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	/* Have we waited longer that a second? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (time_after(jiffies, start_time + HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		/* Lengthen the minimum hold time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				       GL_GLOCK_MAX_HOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  * gfs2_glock_wait - wait on a glock acquisition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  * @gh: the glock holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  * Returns: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int gfs2_glock_wait(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	unsigned long start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	gfs2_glock_update_hold_time(gh->gh_gl, start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	return gh->gh_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	for (i = 0; i < num_gh; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * @num_gh: the number of holders in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * @ghs: the glock holder array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  * Returns: 0 on success, meaning all glocks have been granted and are held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  *          -ESTALE if the request timed out, meaning all glocks were released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  *          and the caller should retry the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	int i, ret = 0, timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	unsigned long start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	bool keep_waiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 * Total up the (minimum hold time * 2) of all glocks and use that to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	 * determine the max amount of time we should wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	for (i = 0; i < num_gh; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		timeout += ghs[i].gh_gl->gl_hold_time << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) wait_for_dlm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	if (!wait_event_timeout(sdp->sd_async_glock_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				!glocks_pending(num_gh, ghs), timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		ret = -ESTALE; /* request timed out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 * If dlm granted all our requests, we need to adjust the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 * minimum hold time values according to how long we waited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * If our request timed out, we need to repeatedly release any held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * glocks we acquired thus far to allow dlm to acquire the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * glocks without deadlocking.  We cannot currently cancel outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * glock acquisitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 * The HIF_WAIT bit tells us which requests still need a response from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	 * dlm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	 * If dlm sent us any errors, we return the first error we find.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	keep_waiting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	for (i = 0; i < num_gh; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		/* Skip holders we have already dequeued below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		if (!gfs2_holder_queued(&ghs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		/* Skip holders with a pending DLM response. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			keep_waiting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			if (ret == -ESTALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				gfs2_glock_dq(&ghs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				gfs2_glock_update_hold_time(ghs[i].gh_gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 							    start_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			ret = ghs[i].gh_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (keep_waiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		goto wait_for_dlm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 * At this point, we've either acquired all locks or released them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * handle_callback - process a demote request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * @state: the state the caller wants us to change to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * There are only two requests that we are going to see in actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static void handle_callback(struct gfs2_glock *gl, unsigned int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			    unsigned long delay, bool remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		gfs2_set_demote(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		gl->gl_demote_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		gl->gl_demote_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			gl->gl_demote_state != state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		gl->gl_demote_state = LM_ST_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (gl->gl_ops->go_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		gl->gl_ops->go_callback(gl, remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	trace_gfs2_demote_rq(gl, remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	struct va_format vaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		seq_vprintf(seq, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		vaf.fmt = fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		vaf.va = &args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		pr_err("%pV", &vaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * add_to_queue - Add a holder to the wait queue (but look for recursion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * @gh: the holder structure to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * Eventually we should move the recursive locking trap to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * debugging option or something like that. This is the fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * path and needs to have the minimum number of distractions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static inline void add_to_queue(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) __releases(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) __acquires(&gl->gl_lockref.lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	struct gfs2_glock *gl = gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	struct list_head *insert_pt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	struct gfs2_holder *gh2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	int try_futile = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		GLOCK_BUG_ON(gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		if (test_bit(GLF_LOCK, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			try_futile = !may_grant(gl, gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			goto trap_recursive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		if (try_futile &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			gh->gh_error = GLR_TRYFAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			gfs2_holder_wake(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			insert_pt = &gh2->gh_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	trace_gfs2_glock_queue(gh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (likely(insert_pt == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		list_add_tail(&gh->gh_list, &gl->gl_holders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			goto do_cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	list_add_tail(&gh->gh_list, insert_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) do_cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) trap_recursive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	fs_err(sdp, "lock type: %d req lock state : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	fs_err(sdp, "lock type: %d req lock state : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	gfs2_dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * if (gh->gh_flags & GL_ASYNC), this never returns an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  * Returns: 0, GLR_TRYFAILED, or errno on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int gfs2_glock_nq(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct gfs2_glock *gl = gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (test_bit(GLF_LRU, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		gfs2_glock_remove_from_lru(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	add_to_queue(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		__gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	run_queue(gl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (!(gh->gh_flags & GL_ASYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		error = gfs2_glock_wait(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  * gfs2_glock_poll - poll to see if an async request has been completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  * @gh: the holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) int gfs2_glock_poll(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * @gh: the glock holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) void gfs2_glock_dq(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	struct gfs2_glock *gl = gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	unsigned delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	int fast_path = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	 * If we're in the process of file system withdraw, we cannot just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	 * dequeue any glocks until our journal is recovered, lest we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	 * introduce file system corruption. We need two exceptions to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	 * rule: We need to allow unlocking of nondisk glocks and the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 * for our own journal that needs recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	    glock_blocked_by_withdraw(gl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	    gh->gh_gl != sdp->sd_jinode_gl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		sdp->sd_glock_dqs_held++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			    TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	if (gh->gh_flags & GL_NOCACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	list_del_init(&gh->gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	clear_bit(HIF_HOLDER, &gh->gh_iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (find_first_holder(gl) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (list_empty(&gl->gl_holders) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			fast_path = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		gfs2_glock_add_to_lru(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	trace_gfs2_glock_queue(gh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (unlikely(!fast_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		    gl->gl_name.ln_type == LM_TYPE_INODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			delay = gl->gl_hold_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		__gfs2_glock_queue_work(gl, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) void gfs2_glock_dq_wait(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	struct gfs2_glock *gl = gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	gfs2_glock_dq(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	gfs2_glock_dq(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	gfs2_holder_uninit(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  * gfs2_glock_nq_num - acquire a glock based on lock number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  * @sdp: the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  * @number: the lock number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * @glops: the glock operations for the type of glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  * @state: the state to acquire the glock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  * @flags: modifier flags for the acquisition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)  * @gh: the struct gfs2_holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		      const struct gfs2_glock_operations *glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		      unsigned int state, u16 flags, struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		error = gfs2_glock_nq_init(gl, state, flags, gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * glock_compare - Compare two struct gfs2_glock structures for sorting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * @arg_a: the first structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * @arg_b: the second structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int glock_compare(const void *arg_a, const void *arg_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (a->ln_number > b->ln_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	if (a->ln_number < b->ln_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  * @num_gh: the number of structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  * @ghs: an array of struct gfs2_holder structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * Returns: 0 on success (all glocks acquired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  *          errno on failure (no glocks acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		     struct gfs2_holder **p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	unsigned int x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	for (x = 0; x < num_gh; x++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		p[x] = &ghs[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	for (x = 0; x < num_gh; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		error = gfs2_glock_nq(p[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			while (x--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 				gfs2_glock_dq(p[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * gfs2_glock_nq_m - acquire multiple glocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  * @num_gh: the number of structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  * @ghs: an array of struct gfs2_holder structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)  * Returns: 0 on success (all glocks acquired),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)  *          errno on failure (no glocks acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	struct gfs2_holder *tmp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	struct gfs2_holder **pph = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	switch(num_gh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		return gfs2_glock_nq(ghs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		if (num_gh <= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 				    GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		if (!pph)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	error = nq_m_sync(num_gh, ghs, pph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (pph != tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		kfree(pph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  * gfs2_glock_dq_m - release multiple glocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  * @num_gh: the number of structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * @ghs: an array of struct gfs2_holder structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	while (num_gh--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		gfs2_glock_dq(&ghs[num_gh]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	unsigned long delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	unsigned long holdtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	gfs2_glock_hold(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	holdtime = gl->gl_tchange + gl->gl_hold_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!list_empty(&gl->gl_holders) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		if (time_before(now, holdtime))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			delay = holdtime - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			delay = gl->gl_hold_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	handle_callback(gl, state, delay, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	__gfs2_glock_queue_work(gl, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  * gfs2_should_freeze - Figure out if glock should be frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)  * @gl: The glock in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)  * Glocks are not frozen if (a) the result of the dlm operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)  * an error, (b) the locking operation was an unlock operation or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)  * (c) if there is a "noexp" flagged request anywhere in the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * Returns: 1 if freezing should occur, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static int gfs2_should_freeze(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	const struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	if (gl->gl_target == LM_ST_UNLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		if (LM_FLAG_NOEXP & gh->gh_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * gfs2_glock_complete - Callback used by locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * @gl: Pointer to the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  * @ret: The return value from the dlm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * The gl_reply field is under the gl_lockref.lock lock so that it is ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  * to use a bitfield shared with other glock state fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	gl->gl_reply = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		if (gfs2_should_freeze(gl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			set_bit(GLF_FROZEN, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	__gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	struct gfs2_glock *gla, *glb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	gla = list_entry(a, struct gfs2_glock, gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	glb = list_entry(b, struct gfs2_glock, gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  * gfs2_dispose_glock_lru - Demote a list of glocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  * @list: The list to dispose of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)  * Disposing of glocks may involve disk accesses, so that here we sort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  * the glocks by number (i.e. disk location of the inodes) so that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  * there are any such accesses, they'll be sent in order (mostly).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)  * Must be called under the lru_lock, but may drop and retake this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)  * lock. While the lru_lock is dropped, entries may vanish from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)  * list, but no new entries will appear on the list (since it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  * private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static void gfs2_dispose_glock_lru(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) __releases(&lru_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) __acquires(&lru_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	list_sort(NULL, list, glock_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	while(!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		list_del_init(&gl->gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		clear_bit(GLF_LRU, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		if (!spin_trylock(&gl->gl_lockref.lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) add_back_to_lru:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			list_add(&gl->gl_lru, &lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			set_bit(GLF_LRU, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			atomic_inc(&lru_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			goto add_back_to_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		if (demote_ok(gl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		__gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		cond_resched_lock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * @nr: The number of entries to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  * This function selects the entries on the LRU which are able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * be demoted, and then kicks off the process by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  * gfs2_dispose_glock_lru() above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static long gfs2_scan_glock_lru(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	LIST_HEAD(skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	LIST_HEAD(dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	long freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	spin_lock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	while ((nr-- >= 0) && !list_empty(&lru_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		/* Test for being demotable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			list_move(&gl->gl_lru, &dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			atomic_dec(&lru_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		list_move(&gl->gl_lru, &skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	list_splice(&skipped, &lru_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (!list_empty(&dispose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		gfs2_dispose_glock_lru(&dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	spin_unlock(&lru_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 					    struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	if (!(sc->gfp_mask & __GFP_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		return SHRINK_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	return gfs2_scan_glock_lru(sc->nr_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 					     struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	return vfs_pressure_ratio(atomic_read(&lru_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static struct shrinker glock_shrinker = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	.seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	.count_objects = gfs2_glock_shrink_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	.scan_objects = gfs2_glock_shrink_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * glock_hash_walk - Call a function for glock in a hash bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  * @examiner: the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  * @sdp: the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  * Note that the function can be called multiple times on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  * object.  So the user must ensure that the function can cope with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  * that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	struct rhashtable_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	rhashtable_walk_enter(&gl_hash_table, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		rhashtable_walk_start(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			if (gl->gl_name.ln_sbd == sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 				examiner(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		rhashtable_walk_stop(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	rhashtable_walk_exit(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	bool queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	queued = queue_delayed_work(gfs2_delete_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 				    &gl->gl_delete, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	if (queued)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	return queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) void gfs2_cancel_delete_work(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	if (cancel_delayed_work(&gl->gl_delete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		gfs2_glock_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static void flush_delete_work(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		if (cancel_delayed_work(&gl->gl_delete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			queue_delayed_work(gfs2_delete_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 					   &gl->gl_delete, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	glock_hash_walk(flush_delete_work, sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	flush_workqueue(gfs2_delete_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)  * @gl: The glock to thaw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static void thaw_glock(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (!lockref_get_not_dead(&gl->gl_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  * clear_glock - look at a glock and see if we can free it from glock cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * @gl: the glock to look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static void clear_glock(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	gfs2_glock_remove_from_lru(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (!__lockref_is_dead(&gl->gl_lockref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		if (gl->gl_state != LM_ST_UNLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		__gfs2_glock_queue_work(gl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)  * gfs2_glock_thaw - Thaw any frozen glocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  * @sdp: The super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) void gfs2_glock_thaw(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	glock_hash_walk(thaw_glock, sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	gfs2_dump_glock(seq, gl, fsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) static void dump_glock_func(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  * gfs2_gl_hash_clear - Empty out the glock hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  * @sdp: the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * @wait: wait until it's all gone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  * Called when unmounting the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	flush_workqueue(glock_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	glock_hash_walk(clear_glock, sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	flush_workqueue(glock_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	wait_event_timeout(sdp->sd_glock_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			   atomic_read(&sdp->sd_glock_disposal) == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			   HZ * 600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	glock_hash_walk(dump_glock_func, sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	struct gfs2_glock *gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	ret = gfs2_truncatei_resume(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	gfs2_glock_assert_withdraw(gl, ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	clear_bit(GLF_LOCK, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	run_queue(gl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static const char *state2str(unsigned state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	switch(state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	case LM_ST_UNLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		return "UN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	case LM_ST_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		return "SH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	case LM_ST_DEFERRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		return "DF";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	case LM_ST_EXCLUSIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		return "EX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	return "??";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	if (flags & LM_FLAG_TRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		*p++ = 't';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if (flags & LM_FLAG_TRY_1CB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		*p++ = 'T';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (flags & LM_FLAG_NOEXP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		*p++ = 'e';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	if (flags & LM_FLAG_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		*p++ = 'A';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (flags & LM_FLAG_PRIORITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		*p++ = 'p';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (flags & GL_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		*p++ = 'a';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	if (flags & GL_EXACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		*p++ = 'E';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (flags & GL_NOCACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		*p++ = 'c';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	if (test_bit(HIF_HOLDER, &iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		*p++ = 'H';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	if (test_bit(HIF_WAIT, &iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		*p++ = 'W';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	if (test_bit(HIF_FIRST, &iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		*p++ = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	*p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * dump_holder - print information about a glock holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * @seq: the seq_file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * @gh: the glock holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * @fs_id_buf: pointer to file system id (if requested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 			const char *fs_id_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	struct task_struct *gh_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	char flags_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (gh->gh_owner_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		       fs_id_buf, state2str(gh->gh_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		       gh->gh_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		       gh_owner ? gh_owner->comm : "(ended)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		       (void *)gh->gh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	const unsigned long *gflags = &gl->gl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (test_bit(GLF_LOCK, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		*p++ = 'l';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	if (test_bit(GLF_DEMOTE, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		*p++ = 'D';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		*p++ = 'd';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		*p++ = 'p';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	if (test_bit(GLF_DIRTY, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		*p++ = 'y';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (test_bit(GLF_LFLUSH, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		*p++ = 'f';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		*p++ = 'i';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (test_bit(GLF_REPLY_PENDING, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		*p++ = 'r';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	if (test_bit(GLF_INITIAL, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		*p++ = 'I';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (test_bit(GLF_FROZEN, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		*p++ = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	if (!list_empty(&gl->gl_holders))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		*p++ = 'q';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (test_bit(GLF_LRU, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		*p++ = 'L';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	if (gl->gl_object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		*p++ = 'o';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (test_bit(GLF_BLOCKING, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		*p++ = 'b';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (test_bit(GLF_INODE_CREATING, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		*p++ = 'c';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	if (test_bit(GLF_PENDING_DELETE, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		*p++ = 'P';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	if (test_bit(GLF_FREEING, gflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		*p++ = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	*p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * gfs2_dump_glock - print information about a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  * @seq: The seq_file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * @fsid: If true, also dump the file system id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  * The file format is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  * One line per object, capital letters are used to indicate objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  * other objects are indented by a single space and follow the glock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  * which they are related. Fields are indicated by lower case letters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)  * followed by a colon and the field value, except for strings which are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  * [] so that its possible to see if they are composed of spaces for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)  * example. The field's are n = number (id of the object), f = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)  * t = type, s = state, r = refcount, e = error, p = pid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	const struct gfs2_glock_operations *glops = gl->gl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	unsigned long long dtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	const struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	char gflags_buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	unsigned long nrpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		struct address_space *mapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		nrpages = mapping->nrpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	memset(fs_id_buf, 0, sizeof(fs_id_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	if (fsid && sdp) /* safety precaution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	dtime = jiffies - gl->gl_demote_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	dtime *= 1000000/HZ; /* demote time in uSec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		dtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	gfs2_print_dbg(seq, "%sG:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		       "v:%d r:%d m:%ld p:%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		       fs_id_buf, state2str(gl->gl_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		       gl->gl_name.ln_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		       (unsigned long long)gl->gl_name.ln_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		       gflags2str(gflags_buf, gl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		       state2str(gl->gl_target),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		       state2str(gl->gl_demote_state), dtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		       atomic_read(&gl->gl_ail_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		       atomic_read(&gl->gl_revokes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		       (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	list_for_each_entry(gh, &gl->gl_holders, gh_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		dump_holder(seq, gh, fs_id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		glops->go_dump(seq, gl, fs_id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	struct gfs2_glock *gl = iter_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		   gl->gl_name.ln_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		   (unsigned long long)gl->gl_name.ln_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) static const char *gfs2_gltype[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	"type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	"reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	"nondisk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	"inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	"rgrp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	"meta",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	"iopen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	"flock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	"plock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	"quota",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	"journal",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static const char *gfs2_stype[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	[GFS2_LKS_SRTT]		= "srtt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	[GFS2_LKS_SRTTVAR]	= "srttvar",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	[GFS2_LKS_SRTTB]	= "srttb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	[GFS2_LKS_SIRT]		= "sirt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	[GFS2_LKS_DCOUNT]	= "dlm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	[GFS2_LKS_QCOUNT]	= "queue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	struct gfs2_sbd *sdp = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	loff_t pos = *(loff_t *)iter_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	unsigned index = pos >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	unsigned subindex = pos & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	if (index == 0 && subindex != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		if (index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			seq_printf(seq, " %15u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 				   lkstats[index - 1].stats[subindex]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	seq_putc(seq, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) int __init gfs2_glock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	ret = rhashtable_init(&gl_hash_table, &ht_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (!glock_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		rhashtable_destroy(&gl_hash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 						0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (!gfs2_delete_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		destroy_workqueue(glock_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		rhashtable_destroy(&gl_hash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	ret = register_shrinker(&glock_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		destroy_workqueue(gfs2_delete_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		destroy_workqueue(glock_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		rhashtable_destroy(&gl_hash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		init_waitqueue_head(glock_wait_table + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) void gfs2_glock_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	unregister_shrinker(&glock_shrinker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	rhashtable_destroy(&gl_hash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	destroy_workqueue(glock_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	destroy_workqueue(gfs2_delete_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	struct gfs2_glock *gl = gi->gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (gl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		if (n == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		if (!lockref_put_not_zero(&gl->gl_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 			gfs2_glock_queue_put(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		gl = rhashtable_walk_next(&gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		if (IS_ERR_OR_NULL(gl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 			if (gl == ERR_PTR(-EAGAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 				n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			gl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		if (gl->gl_name.ln_sbd != gi->sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		if (n <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			if (!lockref_get_not_dead(&gl->gl_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 			if (__lockref_is_dead(&gl->gl_lockref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 			n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	gi->gl = gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	__acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	struct gfs2_glock_iter *gi = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	loff_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * We can either stay where we are, skip to the next hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 * entry, or start from the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (*pos < gi->last_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		rhashtable_walk_exit(&gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		n = *pos + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		n = *pos - gi->last_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	rhashtable_walk_start(&gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	gfs2_glock_iter_next(gi, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	gi->last_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	return gi->gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 				 loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	struct gfs2_glock_iter *gi = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	gi->last_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	gfs2_glock_iter_next(gi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	return gi->gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	__releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct gfs2_glock_iter *gi = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	rhashtable_walk_stop(&gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	dump_glock(seq, iter_ptr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	if (*pos >= GFS2_NR_SBSTATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 				   loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (*pos >= GFS2_NR_SBSTATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static const struct seq_operations gfs2_glock_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	.start = gfs2_glock_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	.next  = gfs2_glock_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	.stop  = gfs2_glock_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	.show  = gfs2_glock_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) static const struct seq_operations gfs2_glstats_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	.start = gfs2_glock_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	.next  = gfs2_glock_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	.stop  = gfs2_glock_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	.show  = gfs2_glstats_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static const struct seq_operations gfs2_sbstats_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	.start = gfs2_sbstats_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	.next  = gfs2_sbstats_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	.stop  = gfs2_sbstats_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	.show  = gfs2_sbstats_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) static int __gfs2_glocks_open(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			      const struct seq_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		struct seq_file *seq = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		struct gfs2_glock_iter *gi = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		gi->sdp = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		if (seq->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 			seq->size = GFS2_SEQ_GOODSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		 * Initially, we are "before" the first hash table entry; the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		 * first call to rhashtable_walk_next gets us the first entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		gi->last_pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		gi->gl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) static int gfs2_glocks_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static int gfs2_glocks_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	struct seq_file *seq = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	struct gfs2_glock_iter *gi = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	if (gi->gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		gfs2_glock_put(gi->gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	rhashtable_walk_exit(&gi->hti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	return seq_release_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static int gfs2_glstats_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static const struct file_operations gfs2_glocks_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	.owner   = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	.open    = gfs2_glocks_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	.read    = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	.llseek  = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	.release = gfs2_glocks_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) static const struct file_operations gfs2_glstats_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	.owner   = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	.open    = gfs2_glstats_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	.read    = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	.llseek  = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	.release = gfs2_glocks_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			    &gfs2_glocks_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 			    &gfs2_glstats_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 			    &gfs2_sbstats_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	debugfs_remove_recursive(sdp->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	sdp->debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) void gfs2_register_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	gfs2_root = debugfs_create_dir("gfs2", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) void gfs2_unregister_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	debugfs_remove(gfs2_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	gfs2_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }