^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __GLOCK_DOT_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __GLOCK_DOT_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Options for hostdata parser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) Opt_jid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) Opt_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) Opt_first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) Opt_nodir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) Opt_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * lm_lockname types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define LM_TYPE_RESERVED 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define LM_TYPE_NONDISK 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define LM_TYPE_INODE 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define LM_TYPE_RGRP 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define LM_TYPE_META 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define LM_TYPE_IOPEN 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define LM_TYPE_FLOCK 0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define LM_TYPE_PLOCK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define LM_TYPE_QUOTA 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define LM_TYPE_JOURNAL 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * lm_lock() states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * SHARED is compatible with SHARED, not with DEFERRED or EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define LM_ST_UNLOCKED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define LM_ST_EXCLUSIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define LM_ST_DEFERRED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define LM_ST_SHARED 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * lm_lock() flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * LM_FLAG_TRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Don't wait to acquire the lock if it can't be granted immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * LM_FLAG_TRY_1CB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Send one blocking callback if TRY is set and the lock is not granted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * LM_FLAG_NOEXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * GFS sets this flag on lock requests it makes while doing journal recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * These special requests should not be blocked due to the recovery like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * ordinary locks would be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * LM_FLAG_ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * also be granted in SHARED. The preferred state is whichever is compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * with other granted locks, or the specified state if no other locks exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * LM_FLAG_PRIORITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Override fairness considerations. Suppose a lock is held in a shared state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * and there is a pending request for the deferred state. A shared lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * request with the priority flag would be allowed to bypass the deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * request and directly join the other shared lock. A shared lock request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * without the priority flag might be forced to wait until the deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * requested had acquired and released the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define LM_FLAG_TRY 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define LM_FLAG_TRY_1CB 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define LM_FLAG_NOEXP 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define LM_FLAG_ANY 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define LM_FLAG_PRIORITY 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define GL_ASYNC 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define GL_EXACT 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define GL_SKIP 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define GL_NOCACHE 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * lm_async_cb return flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * LM_OUT_ST_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Masks the lower two bits of lock state in the returned value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * LM_OUT_CANCELED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * The lock request was canceled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define LM_OUT_ST_MASK 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define LM_OUT_CANCELED 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define LM_OUT_ERROR 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * lm_recovery_done() messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define LM_RD_GAVEUP 308
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define LM_RD_SUCCESS 309
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define GLR_TRYFAILED 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define GL_GLOCK_MIN_HOLD (long)(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct lm_lockops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) const char *lm_proto_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void (*lm_first_done) (struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void (*lm_unmount) (struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void (*lm_withdraw) (struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void (*lm_put_lock) (struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void (*lm_cancel) (struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const match_table_t *lm_tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) extern struct workqueue_struct *gfs2_delete_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct gfs2_holder *gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct pid *pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Look in glock's list of holders for one with current task as owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pid = task_pid(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_for_each_entry(gh, &gl->gl_holders, gh_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (gh->gh_owner_pid == pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) gh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return gl->gl_state == LM_ST_EXCLUSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return gl->gl_state == LM_ST_DEFERRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return gl->gl_state == LM_ST_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (gl->gl_ops->go_flags & GLOF_ASPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return (struct address_space *)(gl + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) const struct gfs2_glock_operations *glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int create, struct gfs2_glock **glp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) extern void gfs2_glock_hold(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) extern void gfs2_glock_put(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u16 flags, struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) extern void gfs2_holder_reinit(unsigned int state, u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) extern void gfs2_holder_uninit(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) extern int gfs2_glock_nq(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) extern int gfs2_glock_poll(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) extern int gfs2_glock_wait(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) extern void gfs2_glock_dq(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) const struct gfs2_glock_operations *glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int state, u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct gfs2_holder *gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool fsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) gfs2_dump_glock(NULL, gl, true); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BUG(); } } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) gfs2_dump_glock(NULL, gl, true); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) gfs2_dump_glock(NULL, gl, true); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) extern __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @state: the state we're requesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @flags: the modifier flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @gh: the holder structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Returns: 0, GLR_*, or errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned int state, u16 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) gfs2_holder_init(gl, state, flags, gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) error = gfs2_glock_nq(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) gfs2_holder_uninit(gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) extern void gfs2_glock_free(struct gfs2_glock *gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) extern int __init gfs2_glock_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) extern void gfs2_glock_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) extern void gfs2_register_debugfs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) extern void gfs2_unregister_debugfs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) extern const struct lm_lockops gfs2_dlm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) gh->gh_gl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return !list_empty(&gh->gh_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * glock_set_object - set the gl_object field of a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * @object: the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline void glock_set_object(struct gfs2_glock *gl, void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) gfs2_dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) gl->gl_object = object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * glock_clear_object - clear the gl_object field of a glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @object: the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * I'd love to similarly add this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * gfs2_dump_glock(NULL, gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Unfortunately, that's not possible because as soon as gfs2_delete_inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * frees the block in the rgrp, another process can reassign it for an I_NEW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * That means gfs2_delete_inode may subsequently try to call this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * for a glock that's already pointing to a brand new inode. If we clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * new inode's gl_object, we'll introduce metadata corruption. Function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * tries to clear gl_object, so it's more than just gfs2_delete_inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (gl->gl_object == object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) gl->gl_object = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif /* __GLOCK_DOT_H__ */