^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * dlmlock.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * underlying calls for lock creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2004 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "../cluster/heartbeat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "../cluster/nodemanager.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "../cluster/tcp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "dlmapi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "dlmcommon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "dlmconvert.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define MLOG_MASK_PREFIX ML_DLM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "../cluster/masklog.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct kmem_cache *dlm_lock_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_SPINLOCK(dlm_cookie_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static u64 dlm_next_cookie = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct dlm_lock *lock, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void dlm_init_lock(struct dlm_lock *newlock, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 node, u64 cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void dlm_lock_release(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void dlm_lock_detach_lockres(struct dlm_lock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int dlm_init_lock_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) dlm_lock_cache = kmem_cache_create("o2dlm_lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) sizeof(struct dlm_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 0, SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (dlm_lock_cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void dlm_destroy_lock_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) kmem_cache_destroy(dlm_lock_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Tell us whether we can grant a new lock request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * caller needs: res->spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * taken: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * held on exit: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * returns: 1 if the lock can be granted, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct dlm_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct dlm_lock *tmplock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) list_for_each_entry(tmplock, &res->granted, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) list_for_each_entry(tmplock, &res->converting, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!dlm_lock_compatible(tmplock->ml.convert_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) lock->ml.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* performs lock creation at the lockres master site
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * caller needs: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * taken: takes and drops res->spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * held on exit: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * returns: DLM_NORMAL, DLM_NOTQUEUED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct dlm_lock *lock, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int call_ast = 0, kick_thread = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) enum dlm_status status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mlog(0, "type=%d\n", lock->ml.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* if called from dlm_create_lock_handler, need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * ensure it will not sleep in dlm_wait_on_lockres */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) status = __dlm_lockres_state_to_status(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (status != DLM_NORMAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) lock->ml.node != dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* erf. state changed after lock was dropped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __dlm_wait_on_lockres(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __dlm_lockres_reserve_ast(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (dlm_can_grant_new_lock(res, lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mlog(0, "I can grant this lock right away\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* got it right away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) lock->lksb->status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dlm_lock_get(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list_add_tail(&lock->list, &res->granted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* for the recovery lock, we can't allow the ast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * to be queued since the dlmthread is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * frozen. but the recovery lock is always locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * with LKM_NOQUEUE so we do not need the ast in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * this special case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!dlm_is_recovery_lock(res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) res->lockname.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) kick_thread = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) call_ast = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mlog(0, "%s: returning DLM_NORMAL to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) "node %u for reco lock\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) lock->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* for NOQUEUE request, unless we get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * lock right away, return DLM_NOTQUEUED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (flags & LKM_NOQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) status = DLM_NOTQUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (dlm_is_recovery_lock(res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) res->lockname.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mlog(0, "%s: returning NOTQUEUED to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) "node %u for reco lock\n", dlm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) lock->ml.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dlm_lock_get(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) list_add_tail(&lock->list, &res->blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kick_thread = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) wake_up(&res->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* either queue the ast or release it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (call_ast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dlm_queue_ast(dlm, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dlm_lockres_release_ast(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dlm_lockres_calc_usage(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (kick_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dlm_kick_thread(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void dlm_revert_pending_lock(struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct dlm_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* remove from local queue if it failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) list_del_init(&lock->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * caller needs: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * taken: takes and drops res->spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * held on exit: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * returns: DLM_DENIED, DLM_RECOVERING, or net status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct dlm_lock *lock, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) enum dlm_status status = DLM_DENIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int lockres_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) lock->ml.type, res->lockname.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) res->lockname.name, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Wait if resource is getting recovered, remastered, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * If the resource was remastered and new owner is self, then exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __dlm_wait_on_lockres(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (res->owner == dlm->node_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return DLM_RECOVERING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) res->state |= DLM_LOCK_RES_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* add lock to local (secondary) queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dlm_lock_get(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_add_tail(&lock->list, &res->blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) lock->lock_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* spec seems to say that you will get DLM_NORMAL when the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * has been queued, meaning we need to wait for a reply here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) status = dlm_send_remote_lock_request(dlm, res, lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) lock->lock_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (status != DLM_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (status == DLM_RECOVERING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dlm_is_recovery_lock(res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) res->lockname.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* recovery lock was mastered by dead node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * we need to have calc_usage shoot down this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * lockres and completely remaster it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mlog(0, "%s: recovery lock was owned by "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) "dead node %u, remaster it now.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dlm->name, res->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else if (status != DLM_NOTQUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * DO NOT call calc_usage, as this would unhash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * the remote lockres before we ever get to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * it. treat as if we never made any change to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * the lockres.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) lockres_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dlm_revert_pending_lock(res, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } else if (dlm_is_recovery_lock(res->lockname.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) res->lockname.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* special case for the $RECOVERY lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * there will never be an AST delivered to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * this lock on the proper secondary queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * (granted), so do it manually. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mlog(0, "%s: $RECOVERY lock for this node (%u) is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) "mastered by %u; got lock, manually granting (no ast)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dlm->name, dlm->node_num, res->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) list_move_tail(&lock->list, &res->granted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (lockres_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dlm_lockres_calc_usage(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) wake_up(&res->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* for remote lock creation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * taken: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * held on exit: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * returns: DLM_NOLOCKMGR, or net status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct dlm_lock_resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct dlm_lock *lock, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct dlm_create_lock create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int tmpret, status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) enum dlm_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memset(&create, 0, sizeof(create));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) create.node_idx = dlm->node_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) create.requested_type = lock->ml.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) create.cookie = lock->ml.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) create.namelen = res->lockname.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) create.flags = cpu_to_be32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) memcpy(create.name, res->lockname.name, create.namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sizeof(create), res->owner, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (tmpret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ret = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (ret == DLM_REJECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) "owned by node %u. That node is coming back up "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) "currently.\n", dlm->name, create.namelen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) create.name, res->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dlm_print_one_lock_resource(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) "node %u\n", dlm->name, create.namelen, create.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tmpret, res->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (dlm_is_host_down(tmpret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ret = DLM_RECOVERING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = dlm_err_to_dlm_status(tmpret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void dlm_lock_get(struct dlm_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) kref_get(&lock->lock_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void dlm_lock_put(struct dlm_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) kref_put(&lock->lock_refs, dlm_lock_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void dlm_lock_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct dlm_lock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) lock = container_of(kref, struct dlm_lock, lock_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) BUG_ON(!list_empty(&lock->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) BUG_ON(!list_empty(&lock->ast_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) BUG_ON(!list_empty(&lock->bast_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) BUG_ON(lock->ast_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) BUG_ON(lock->bast_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dlm_lock_detach_lockres(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (lock->lksb_kernel_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mlog(0, "freeing kernel-allocated lksb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) kfree(lock->lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) kmem_cache_free(dlm_lock_cache, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* associate a lock with it's lockres, getting a ref on the lockres */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void dlm_lock_attach_lockres(struct dlm_lock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct dlm_lock_resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dlm_lockres_get(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) lock->lockres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* drop ref on lockres, if there is still one associated with lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static void dlm_lock_detach_lockres(struct dlm_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct dlm_lock_resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) res = lock->lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) lock->lockres = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mlog(0, "removing lock's lockres reference\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void dlm_init_lock(struct dlm_lock *newlock, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u8 node, u64 cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) INIT_LIST_HEAD(&newlock->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) INIT_LIST_HEAD(&newlock->ast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) INIT_LIST_HEAD(&newlock->bast_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_init(&newlock->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) newlock->ml.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) newlock->ml.convert_type = LKM_IVMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) newlock->ml.highest_blocked = LKM_IVMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) newlock->ml.node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) newlock->ml.pad1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) newlock->ml.list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) newlock->ml.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) newlock->ast = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) newlock->bast = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) newlock->astdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) newlock->ml.cookie = cpu_to_be64(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) newlock->ast_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) newlock->bast_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) newlock->convert_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) newlock->lock_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) newlock->unlock_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) newlock->cancel_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) newlock->lksb_kernel_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) kref_init(&newlock->lock_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct dlm_lockstatus *lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct dlm_lock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int kernel_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!lksb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* zero memory only if kernel-allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!lksb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kmem_cache_free(dlm_lock_cache, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) kernel_allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dlm_init_lock(lock, type, node, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (kernel_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) lock->lksb_kernel_allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) lock->lksb = lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) lksb->lockid = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* handler for lock creation net message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * caller needs: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * taken: takes and drops res->spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * held on exit: none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void **ret_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct dlm_ctxt *dlm = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct dlm_lock_resource *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct dlm_lock *newlock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct dlm_lockstatus *lksb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) enum dlm_status status = DLM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned int namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) BUG_ON(!dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!dlm_grab(dlm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return DLM_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) name = create->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) namelen = create->namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) status = DLM_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!dlm_domain_fully_joined(dlm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) "sending a create_lock message for lock %.*s!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dlm->name, create->node_idx, namelen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) status = DLM_IVBUFLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (namelen > DLM_LOCKID_NAME_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) status = DLM_SYSERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) newlock = dlm_new_lock(create->requested_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) create->node_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) be64_to_cpu(create->cookie), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!newlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) lksb = newlock->lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) lksb->flags |= DLM_LKSB_GET_LVB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mlog(0, "set DLM_LKSB_GET_LVB flag\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) status = DLM_IVLOCKID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) res = dlm_lookup_lockres(dlm, name, namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) status = __dlm_lockres_state_to_status(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (status != DLM_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mlog(0, "lockres recovering/migrating/in-progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dlm_lock_attach_lockres(newlock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (status != DLM_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (newlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dlm_lock_put(newlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dlm_put(dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) u64 tmpnode = node_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* shift single byte of node num into top 8 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) tmpnode <<= 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_lock(&dlm_cookie_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *cookie = (dlm_next_cookie | tmpnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (++dlm_next_cookie & 0xff00000000000000ull) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mlog(0, "This node's cookie will now wrap!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dlm_next_cookie = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spin_unlock(&dlm_cookie_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct dlm_lockstatus *lksb, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) const char *name, int namelen, dlm_astlockfunc_t *ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void *data, dlm_bastlockfunc_t *bast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) enum dlm_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct dlm_lock_resource *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct dlm_lock *lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int convert = 0, recovery = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* yes this function is a mess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * TODO: clean this up. lots of common code in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * lock and convert paths, especially in the retry blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!lksb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dlm_error(DLM_BADARGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return DLM_BADARGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) status = DLM_BADPARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (flags & ~LKM_VALID_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) convert = (flags & LKM_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) recovery = (flags & LKM_RECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (recovery &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (!dlm_is_recovery_lock(name, namelen) || convert) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (convert && (flags & LKM_LOCAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mlog(ML_ERROR, "strange LOCAL convert request!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (convert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* CONVERT request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* if converting, must pass in a valid dlm_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) lock = lksb->lockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) mlog(ML_ERROR, "NULL lock pointer in convert "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) "request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) res = lock->lockres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mlog(ML_ERROR, "NULL lockres pointer in convert "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dlm_lockres_get(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * static after the original lock call. convert requests will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * ensure that everything is the same, or return DLM_BADARGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * this means that DLM_DENIED_NOASTS will never be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (lock->lksb != lksb || lock->ast != ast ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) lock->bast != bast || lock->astdata != data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) status = DLM_BADARGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "astdata=%p\n", lksb, ast, bast, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) "astdata=%p\n", lock->lksb, lock->ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) lock->bast, lock->astdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) retry_convert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dlm_wait_for_recovery(dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (res->owner == dlm->node_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) status = dlmconvert_master(dlm, res, lock, flags, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) status = dlmconvert_remote(dlm, res, lock, flags, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) status == DLM_FORWARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* for now, see how this works without sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * and just retry right away. I suspect the reco
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * or migration will complete fast enough that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * no waiting will be necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mlog(0, "retrying convert with migration/recovery/"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) "in-progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto retry_convert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) u64 tmpcookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* LOCK request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) status = DLM_BADARGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) status = DLM_IVBUFLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dlm_get_next_cookie(dlm->node_num, &tmpcookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dlm_wait_for_recovery(dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* find or create the lock resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) res = dlm_get_lock_resource(dlm, name, namelen, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) status = DLM_IVLOCKID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dlm_lock_attach_lockres(lock, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) lock->ast = ast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) lock->bast = bast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) lock->astdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) retry_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (flags & LKM_VALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) mlog(0, "LKM_VALBLK passed by caller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* LVB requests for non PR, PW or EX locks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * ignored. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (mode < LKM_PRMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) flags &= ~LKM_VALBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) flags |= LKM_GET_LVB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) lock->lksb->flags |= DLM_LKSB_GET_LVB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (res->owner == dlm->node_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) status = dlmlock_master(dlm, res, lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) status = dlmlock_remote(dlm, res, lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) status == DLM_FORWARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (status != DLM_RECOVERING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto retry_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* wait to see the node go down, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * drop down and allow the lockres to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * get cleaned up. need to remaster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dlm_wait_for_node_death(dlm, res->owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) DLM_NODE_DEATH_WAIT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dlm_wait_for_recovery(dlm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto retry_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Inflight taken in dlm_get_lock_resource() is dropped here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_lock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dlm_lockres_drop_inflight_ref(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock(&res->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dlm_lockres_calc_usage(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dlm_kick_thread(dlm, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (status != DLM_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (status != DLM_NOTQUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dlm_error(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (status != DLM_NORMAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (lock && !convert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dlm_lock_put(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) // this is kind of unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) lksb->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* put lockres ref from the convert path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * or from dlm_get_lock_resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dlm_lockres_put(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) EXPORT_SYMBOL_GPL(dlmlock);