^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* Central locking logic has four stages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) dlm_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) dlm_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) request_lock(ls, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) convert_lock(ls, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unlock_lock(ls, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) cancel_lock(ls, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) _request_lock(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) _convert_lock(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) _unlock_lock(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) _cancel_lock(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) do_request(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) do_convert(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) do_unlock(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) do_cancel(r, lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) Stage 1 (lock, unlock) is mainly about checking input args and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) splitting into one of the four main operations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) dlm_lock = request_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dlm_lock+CONVERT = convert_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dlm_unlock = unlock_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dlm_unlock+CANCEL = cancel_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) provided to the next stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) Stage 3, _xxxx_lock(), determines if the operation is local or remote.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) When remote, it calls send_xxxx(), when local it calls do_xxxx().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) given rsb and lkb and queues callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) For remote operations, send_xxxx() results in the corresponding do_xxxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) function being executed on the remote node. The connecting send/receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) calls on local (L) and remote (R) nodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) L: send_xxxx() -> R: receive_xxxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) R: do_xxxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) L: receive_xxxx_reply() <- R: send_xxxx_reply()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "dlm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/dlm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "memory.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "lowcomms.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "requestqueue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include "member.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "lockspace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "ast.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include "lock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "rcom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include "recover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include "lvb_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include "user.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include "config.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int send_remove(struct dlm_rsb *r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct dlm_message *ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int receive_extralen(struct dlm_message *ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void del_timeout(struct dlm_lkb *lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void toss_rsb(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Lock compatibilty matrix - thanks Steve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * UN = Unlocked state. Not really a state, used as a flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * PD = Padding. Used to make the matrix a nice power of two in size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Other states are the same as the VMS DLM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static const int __dlm_compat_matrix[8][8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* UN NL CR CW PR PW EX PD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * This defines the direction of transfer of LVB data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Granted mode is the row; requested mode is the column.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Usage: matrix[grmode+1][rqmode+1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * 1 = LVB is returned to the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * 0 = LVB is written to the resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * -1 = nothing happens to the LVB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const int dlm_lvb_operations[8][8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* UN NL CR CW PR PW EX PD*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define modes_compat(gr, rq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int dlm_modes_compat(int mode1, int mode2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Compatibility matrix for conversions with QUECVT set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Granted mode is the row; requested mode is the column.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Usage: matrix[grmode+1][rqmode+1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static const int __quecvt_compat_matrix[8][8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* UN NL CR CW PR PW EX PD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void dlm_print_lkb(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) (unsigned long long)lkb->lkb_recover_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void dlm_print_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "rlc %d name %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void dlm_dump_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) printk(KERN_ERR "rsb lookup list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) printk(KERN_ERR "rsb grant queue:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) printk(KERN_ERR "rsb convert queue:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) printk(KERN_ERR "rsb wait queue:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Threads cannot use the lockspace while it's being recovered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline void dlm_lock_recovery(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) down_read(&ls->ls_in_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void dlm_unlock_recovery(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) up_read(&ls->ls_in_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int dlm_lock_recovery_try(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return down_read_trylock(&ls->ls_in_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline int can_be_queued(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline int force_blocking_asts(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline int is_demoted(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline int is_altmode(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline int is_granted(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return (lkb->lkb_status == DLM_LKSTS_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline int is_remote(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return !!r->res_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline int is_process_copy(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline int is_master_copy(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static inline int middle_conversion(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline int down_conversion(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline int is_overlap_unlock(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static inline int is_overlap_cancel(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline int is_overlap(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) DLM_IFL_OVERLAP_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* if the operation was a cancel, then return -DLM_ECANCEL, if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) timeout caused the cancel then return -ETIMEDOUT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) rv = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rv = -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) queue_cast(r, lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (is_master_copy(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) send_bast(r, lkb, rqmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Basic operations on rsb's and lkb's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* This is only called to add a reference when the code already holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) a valid reference to the rsb, so there's no need for locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static inline void hold_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) kref_get(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void dlm_hold_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* When all references to the rsb are gone it's transferred to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) the tossed list for later disposal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void put_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) uint32_t bucket = r->res_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spin_lock(&ls->ls_rsbtbl[bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kref_put(&r->res_ref, toss_rsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) spin_unlock(&ls->ls_rsbtbl[bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void dlm_put_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int pre_rsb_struct(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct dlm_rsb *r1, *r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_lock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_unlock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) r1 = dlm_allocate_rsb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) r2 = dlm_allocate_rsb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_lock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (r1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) list_add(&r1->res_hashchain, &ls->ls_new_rsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ls->ls_new_rsb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (r2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) list_add(&r2->res_hashchain, &ls->ls_new_rsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ls->ls_new_rsb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) count = ls->ls_new_rsb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_unlock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unlock any spinlocks, go back and call pre_rsb_struct again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) Otherwise, take an rsb off the list and return it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct dlm_rsb **r_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_lock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (list_empty(&ls->ls_new_rsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) count = ls->ls_new_rsb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_unlock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) log_debug(ls, "find_rsb retry %d %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) count, dlm_config.ci_new_rsb_count, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) list_del(&r->res_hashchain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Convert the empty list_head to a NULL rb_node for tree usage: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) memset(&r->res_hashnode, 0, sizeof(struct rb_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ls->ls_new_rsb_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_unlock(&ls->ls_new_rsb_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) r->res_ls = ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) r->res_length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) memcpy(r->res_name, name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mutex_init(&r->res_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) INIT_LIST_HEAD(&r->res_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) INIT_LIST_HEAD(&r->res_grantqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) INIT_LIST_HEAD(&r->res_convertqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) INIT_LIST_HEAD(&r->res_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) INIT_LIST_HEAD(&r->res_root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) INIT_LIST_HEAD(&r->res_recover_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *r_ret = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) char maxname[DLM_RESNAME_MAXLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) memset(maxname, 0, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) memcpy(maxname, name, nlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct dlm_rsb **r_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct rb_node *node = tree->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) r = rb_entry(node, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rc = rsb_cmp(r, name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) else if (rc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *r_ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EBADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *r_ret = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct rb_node **newn = &tree->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) while (*newn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) parent = *newn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) newn = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) else if (rc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) newn = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) log_print("rsb_insert match");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dlm_dump_rsb(rsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dlm_dump_rsb(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) rb_link_node(&rsb->res_hashnode, parent, newn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) rb_insert_color(&rsb->res_hashnode, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Find rsb in rsbtbl and potentially create/add one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Delaying the release of rsb's has a similar benefit to applications keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * NL locks on an rsb, but without the guarantee that the cached master value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * will still be valid when the rsb is reused. Apps aren't always smart enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * to keep NL locks on an rsb that they may lock again shortly; this can lead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * to excessive master lookups and removals if we don't delay the release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Searching for an rsb means looking through both the normal list and toss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * list. When found on the toss list the rsb is moved to the normal list with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * ref count of 1; when found on normal list the ref count is incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * rsb's on the keep list are being used locally and refcounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * rsb's on the toss list are not being used locally, and are not refcounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * The toss list rsb's were either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * - previously used locally but not any more (were on keep list, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * moved to toss list when last refcount dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * - created and put on toss list as a directory record for a lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * (we are the dir node for the res, but are not using the res right now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * but some other node is)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * The purpose of find_rsb() is to return a refcounted rsb for local use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * So, if the given rsb is on the toss list, it is moved to the keep list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * before being returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * toss_rsb() happens when all local usage of the rsb is done, i.e. no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * more refcounts exist, so the rsb is moved from the keep list to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * toss list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * rsb's on both keep and toss lists are used for doing a name to master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * lookups. rsb's that are in use locally (and being refcounted) are on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * the keep list, rsb's that are not in use locally (not refcounted) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * only exist for name/master lookups are on the toss list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * rsb's on the toss list who's dir_nodeid is not local can have stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * name/master mappings. So, remote requests on such rsb's can potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * return with an error, which means the mapping is stale and needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * first_lkid is to keep only a single outstanding request on an rsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * while that rsb has a potentially stale master.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) uint32_t hash, uint32_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int dir_nodeid, int from_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned int flags, struct dlm_rsb **r_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct dlm_rsb *r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int from_local = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int from_other = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int from_dir = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int create = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (flags & R_RECEIVE_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (from_nodeid == dir_nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) from_dir = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) from_other = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } else if (flags & R_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) from_local = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * from_nodeid has sent us a lock in dlm_recover_locks, believing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * we're the new master. Our local recovery may not have set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * res_master_nodeid to our_nodeid yet, so allow either. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * create the rsb; dlm_recover_process_copy() will handle EBADR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * by resending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * If someone sends us a request, we are the dir node, and we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * not find the rsb anywhere, then recreate it. This happens if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * someone sends us a request after we have removed/freed an rsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * from our toss list. (They sent a request instead of lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * because they are using an rsb from their toss list.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (from_local || from_dir ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) (from_other && (dir_nodeid == our_nodeid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) create = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) error = pre_rsb_struct(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto do_toss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * rsb is active, so we can't check master_nodeid without lock_rsb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) kref_get(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) do_toss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) goto do_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * rsb found inactive (master_nodeid may be out of date unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * we are the dir_nodeid or were the master) No other thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * is using this rsb because it's on the toss list, so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * look at or update res_master_nodeid without lock_rsb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if ((r->res_master_nodeid != our_nodeid) && from_other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* our rsb was not master, and another node (not the dir node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) has sent us a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) from_nodeid, r->res_master_nodeid, dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) error = -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if ((r->res_master_nodeid != our_nodeid) && from_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* don't think this should ever happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) log_error(ls, "find_rsb toss from_dir %d master %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) from_nodeid, r->res_master_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* fix it and go on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) r->res_first_lkid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (from_local && (r->res_master_nodeid != our_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Because we have held no locks on this rsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) res_master_nodeid could have become stale. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) r->res_first_lkid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) do_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * rsb not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (error == -EBADR && !create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) error = get_rsb_struct(ls, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) r->res_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) r->res_bucket = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) r->res_dir_nodeid = dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) kref_init(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (from_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* want to see how often this happens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) log_debug(ls, "find_rsb new from_dir %d recreate %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) from_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto out_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (from_other && (dir_nodeid != our_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) from_nodeid, dir_nodeid, our_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) error = -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (from_other) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) log_debug(ls, "find_rsb new from_other %d dir %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) from_nodeid, dir_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (dir_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* When we are the dir nodeid, we can set the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) node immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* set_master will send_lookup to dir_nodeid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) r->res_master_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) r->res_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) out_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) *r_ret = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* During recovery, other nodes can send us new MSTCPY locks (from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dlm_recover_locks) before we've made ourself master (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dlm_recover_masters). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) uint32_t hash, uint32_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int dir_nodeid, int from_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned int flags, struct dlm_rsb **r_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct dlm_rsb *r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int recover = (flags & R_RECEIVE_RECOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) error = pre_rsb_struct(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto do_toss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * rsb is active, so we can't check master_nodeid without lock_rsb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kref_get(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) do_toss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto do_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * rsb found inactive. No other thread is using this rsb because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * it's on the toss list, so we can look at or update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * res_master_nodeid without lock_rsb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* our rsb is not master, and another node has sent us a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) request; this should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) from_nodeid, r->res_master_nodeid, dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) error = -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!recover && (r->res_master_nodeid != our_nodeid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) (dir_nodeid == our_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* our rsb is not master, and we are dir; may as well fix it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) this should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) log_error(ls, "find_rsb toss our %d master %d dir %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) our_nodeid, r->res_master_nodeid, dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) do_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * rsb not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) error = get_rsb_struct(ls, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) r->res_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) r->res_bucket = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) r->res_dir_nodeid = dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) r->res_master_nodeid = dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) kref_init(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *r_ret = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int flags, struct dlm_rsb **r_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) uint32_t hash, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (len > DLM_RESNAME_MAXLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) hash = jhash(name, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) b = hash & (ls->ls_rsbtbl_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dir_nodeid = dlm_hash2nodeid(ls, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (dlm_no_directory(ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) from_nodeid, flags, r_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) from_nodeid, flags, r_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* we have received a request and found that res_master_nodeid != our_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) so we need to return an error or make ourself the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int from_nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (dlm_no_directory(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) from_nodeid, r->res_master_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) r->res_dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (from_nodeid != r->res_dir_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* our rsb is not master, and another node (not the dir node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) has sent us a request. this is much more common when our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) master_nodeid is zero, so limit debug to non-zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (r->res_master_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) log_debug(ls, "validate master from_other %d master %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) "dir %d first %x %s", from_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) r->res_master_nodeid, r->res_dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return -ENOTBLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* our rsb is not master, but the dir nodeid has sent us a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) request; this could happen with master 0 / res_nodeid -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (r->res_master_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) log_error(ls, "validate master from_dir %d master %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) "first %x %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) from_nodeid, r->res_master_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) r->res_master_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * We're the dir node for this res and another node wants to know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * master nodeid. During normal operation (non recovery) this is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * called from receive_lookup(); master lookups when the local node is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * the dir node are done by find_rsb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * normal operation, we are the dir node for a resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * . _request_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * . set_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * . send_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * . receive_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * . dlm_master_lookup flags 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * recover directory, we are rebuilding dir for all resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * . dlm_recover_directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * . dlm_rcom_names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * remote node sends back the rsb names it is master of and we are dir of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * we either create new rsb setting remote node as master, or find existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * rsb and set master to be the remote node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * recover masters, we are finding the new master for resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * . dlm_recover_masters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * . recover_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * . dlm_send_rcom_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * . receive_rcom_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned int flags, int *r_nodeid, int *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct dlm_rsb *r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) uint32_t hash, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int from_master = (flags & DLM_LU_RECOVER_DIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int fix_master = (flags & DLM_LU_RECOVER_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int dir_nodeid, error, toss_list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (len > DLM_RESNAME_MAXLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (from_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) our_nodeid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) hash = jhash(name, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) b = hash & (ls->ls_rsbtbl_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) dir_nodeid = dlm_hash2nodeid(ls, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (dir_nodeid != our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) from_nodeid, dir_nodeid, our_nodeid, hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ls->ls_num_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) *r_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) error = pre_rsb_struct(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* because the rsb is active, we need to lock_rsb before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) checking/changing re_master_nodeid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* because the rsb is inactive (on toss list), it's not refcounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) and lock_rsb is not used, but is protected by the rsbtbl lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) toss_list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (r->res_dir_nodeid != our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* should not happen, but may as well fix it and carry on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) r->res_dir_nodeid, our_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) r->res_dir_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* Recovery uses this function to set a new master when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) the previous master failed. Setting NEW_MASTER will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) force dlm_recover_masters to call recover_master on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) rsb even though the res_nodeid is no longer removed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) r->res_master_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) r->res_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rsb_set_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (toss_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* I don't think we should ever find it on toss list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) log_error(ls, "dlm_master_lookup fix_master on toss");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (from_master && (r->res_master_nodeid != from_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* this will happen if from_nodeid became master during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) a previous recovery cycle, and we aborted the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cycle before recovering this master value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) log_limit(ls, "dlm_master_lookup from_master %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "master_nodeid %d res_nodeid %d first %x %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) from_nodeid, r->res_master_nodeid, r->res_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (r->res_master_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) log_error(ls, "from_master %d our_master", from_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) r->res_master_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) r->res_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) rsb_set_flag(r, RSB_NEW_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!r->res_master_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* this will happen if recovery happens while we're looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) up the master for this rsb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) from_nodeid, r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) r->res_master_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) r->res_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!from_master && !fix_master &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) (r->res_master_nodeid == from_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /* this can happen when the master sends remove, the dir node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) finds the rsb on the keep list and ignores the remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) and the former master sends a lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) log_limit(ls, "dlm_master_lookup from master %d flags %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) "first %x %s", from_nodeid, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) out_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *r_nodeid = r->res_master_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) *result = DLM_LU_MATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (toss_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) r->res_toss_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* the rsb was inactive (on toss list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* the rsb was active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) error = get_rsb_struct(ls, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (error == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) r->res_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) r->res_bucket = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) r->res_dir_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) r->res_master_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) r->res_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) kref_init(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) r->res_toss_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *result = DLM_LU_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) *r_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) for (i = 0; i < ls->ls_rsbtbl_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) spin_lock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) r = rb_entry(n, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (r->res_hash == hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) spin_unlock(&ls->ls_rsbtbl[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct dlm_rsb *r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) uint32_t hash, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) hash = jhash(name, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) b = hash & (ls->ls_rsbtbl_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) goto out_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) out_dump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void toss_rsb(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) kref_init(&r->res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) r->res_toss_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (r->res_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dlm_free_lvb(r->res_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) r->res_lvbptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* See comment for unhold_lkb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static void unhold_rsb(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rv = kref_put(&r->res_ref, toss_rsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) DLM_ASSERT(!rv, dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void kill_rsb(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* All work is done after the return from kref_put() so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) can release the write_lock before the remove and free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) The rsb must exist as long as any lkb's for it do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) lkb->lkb_resource = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void detach_lkb(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (lkb->lkb_resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) put_rsb(lkb->lkb_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) lkb->lkb_resource = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) lkb = dlm_allocate_lkb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) lkb->lkb_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) kref_init(&lkb->lkb_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) INIT_LIST_HEAD(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) INIT_LIST_HEAD(&lkb->lkb_time_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) INIT_LIST_HEAD(&lkb->lkb_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) mutex_init(&lkb->lkb_cb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) idr_preload(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) spin_lock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (rv >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) lkb->lkb_id = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) spin_unlock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (rv < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) log_error(ls, "create_lkb idr error %d", rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dlm_free_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *lkb_ret = lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) spin_lock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) lkb = idr_find(&ls->ls_lkbidr, lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) kref_get(&lkb->lkb_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) spin_unlock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) *lkb_ret = lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return lkb ? 0 : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static void kill_lkb(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* All work is done after the return from kref_put() so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) can release the write_lock before the detach_lkb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* __put_lkb() is used when an lkb may not have an rsb attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) it so we need to provide the lockspace explicitly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) uint32_t lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_lock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (kref_put(&lkb->lkb_ref, kill_lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) idr_remove(&ls->ls_lkbidr, lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) spin_unlock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) detach_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* for local/process lkbs, lvbptr points to caller's lksb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (lkb->lkb_lvbptr && is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) dlm_free_lvb(lkb->lkb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dlm_free_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) spin_unlock(&ls->ls_lkbidr_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) int dlm_put_lkb(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct dlm_ls *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* This is only called to add a reference when the code already holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) a valid reference to the lkb, so there's no need for locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static inline void hold_lkb(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) kref_get(&lkb->lkb_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* This is called when we need to remove a reference and are certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) it's not the last ref. e.g. del_lkb is always called between a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) find_lkb/put_lkb and is always the inverse of a previous add_lkb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) put_lkb would work fine, but would involve unnecessary locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static inline void unhold_lkb(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) rv = kref_put(&lkb->lkb_ref, kill_lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) DLM_ASSERT(!rv, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static void lkb_add_ordered(struct list_head *new, struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct dlm_lkb *lkb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) list_for_each_entry(lkb, head, lkb_statequeue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (lkb->lkb_rqmode < mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* add/remove lkb to rsb's grant/convert/wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) kref_get(&lkb->lkb_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) lkb->lkb_timestamp = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) lkb->lkb_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case DLM_LKSTS_WAITING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) case DLM_LKSTS_GRANTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* convention says granted locks kept in order of grmode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) lkb->lkb_grmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) case DLM_LKSTS_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) list_add_tail(&lkb->lkb_statequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) &r->res_convertqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) lkb->lkb_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) list_del(&lkb->lkb_statequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) add_lkb(r, lkb, sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int msg_reply_type(int mstype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) switch (mstype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return DLM_MSG_REQUEST_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return DLM_MSG_CONVERT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return DLM_MSG_UNLOCK_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return DLM_MSG_CANCEL_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return DLM_MSG_LOOKUP_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static int nodeid_warned(int nodeid, int num_nodes, int *warned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < num_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!warned[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) warned[i] = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (warned[i] == nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void dlm_scan_waiters(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) s64 us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) s64 debug_maxus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) u32 debug_scanned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) u32 debug_expired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int num_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) int *warned = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!dlm_config.ci_waitwarn_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!lkb->lkb_wait_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) debug_scanned++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (us < dlm_config.ci_waitwarn_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) lkb->lkb_wait_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) debug_expired++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (us > debug_maxus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) debug_maxus = us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (!num_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) num_nodes = ls->ls_num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!warned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) log_error(ls, "waitwarn %x %lld %d us check connection to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) "node %d", lkb->lkb_id, (long long)us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) kfree(warned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (debug_expired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) debug_scanned, debug_expired,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dlm_config.ci_waitwarn_us, (long long)debug_maxus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* add/remove lkb from global waiters list of lkb's waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) a reply from a remote node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (is_overlap_unlock(lkb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) switch (mstype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) lkb->lkb_wait_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) lkb->lkb_id, lkb->lkb_wait_type, mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) lkb->lkb_wait_count, lkb->lkb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) DLM_ASSERT(!lkb->lkb_wait_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) printk("wait_count %d\n", lkb->lkb_wait_count););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) lkb->lkb_wait_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) lkb->lkb_wait_type = mstype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) lkb->lkb_wait_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) log_error(ls, "addwait error %x %d flags %x %d %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) lkb->lkb_id, error, lkb->lkb_flags, mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) lkb->lkb_wait_type, lkb->lkb_resource->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* We clear the RESEND flag because we might be taking an lkb off the waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) list as part of process_requestqueue (e.g. a lookup that has an optimized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) request reply on the requestqueue) between dlm_recover_waiters_pre() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) set RESEND and dlm_recover_waiters_post() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int overlap_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) overlap_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto out_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) overlap_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) goto out_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Cancel state was preemptively cleared by a successful convert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) see next comment, nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if ((mstype == DLM_MSG_CANCEL_REPLY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) log_debug(ls, "remwait %x cancel_reply wait_type %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) lkb->lkb_id, lkb->lkb_wait_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* Remove for the convert reply, and premptively remove for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) cancel reply. A convert has been granted while there's still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) an outstanding cancel on it (the cancel is moot and the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) in the cancel reply should be 0). We preempt the cancel reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) because the app gets the convert result and then can follow up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) with another op, like convert. This subsequent op would see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) lingering state of the cancel and fail with -EBUSY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if ((mstype == DLM_MSG_CONVERT_REPLY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) is_overlap_cancel(lkb) && ms && !ms->m_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) lkb->lkb_wait_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) lkb->lkb_wait_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) goto out_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* N.B. type of reply may not always correspond to type of original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) msg due to lookup->request optimization, verify others? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (lkb->lkb_wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) lkb->lkb_wait_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) goto out_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) mstype, lkb->lkb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) out_del:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /* the force-unlock/cancel has completed and we haven't recvd a reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) to the op that was in progress prior to the unlock/cancel; we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) give up on any reply to the earlier op. FIXME: not sure when/how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) this would happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (overlap_done && lkb->lkb_wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) log_error(ls, "remwait error %x reply %d wait_type %d overlap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) lkb->lkb_id, mstype, lkb->lkb_wait_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) lkb->lkb_wait_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) lkb->lkb_wait_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) lkb->lkb_flags &= ~DLM_IFL_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) lkb->lkb_wait_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (!lkb->lkb_wait_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) list_del_init(&lkb->lkb_wait_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) error = _remove_from_waiters(lkb, mstype, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Handles situations where we might be processing a "fake" or "stub" reply in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) which we can't try to take waiters_mutex again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (ms->m_flags != DLM_IFL_STUB_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) error = _remove_from_waiters(lkb, ms->m_type, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (ms->m_flags != DLM_IFL_STUB_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* If there's an rsb for the same resource being removed, ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) that the remove message is sent before the new lookup message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) It should be rare to need a delay here, but if not, then it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) be worthwhile to add a proper wait mechanism rather than a delay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void wait_pending_remove(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) spin_lock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (ls->ls_remove_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) log_debug(ls, "delay lookup for remove dir %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) r->res_dir_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * ls_remove_spin protects ls_remove_name and ls_remove_len which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * read by other threads in wait_pending_remove. ls_remove_names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * and ls_remove_lens are only used by the scan thread, so they do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * not need protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void shrink_bucket(struct dlm_ls *ls, int b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct rb_node *n, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) int our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) int remote_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) int need_shrink = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) int i, len, rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) next = rb_next(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) r = rb_entry(n, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /* If we're the directory record for this rsb, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) we're not the master of it, then we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) for the master node to send us a dir remove for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) before removing the dir record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!dlm_no_directory(ls) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) (r->res_master_nodeid != our_nodeid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) (dlm_dir_nodeid(r) == our_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) need_shrink = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (!time_after_eq(jiffies, r->res_toss_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) dlm_config.ci_toss_secs * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (!dlm_no_directory(ls) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) (r->res_master_nodeid == our_nodeid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) (dlm_dir_nodeid(r) != our_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /* We're the master of this rsb but we're not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) the directory record, so we need to tell the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dir node to remove the dir record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) ls->ls_remove_lens[remote_count] = r->res_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) memcpy(ls->ls_remove_names[remote_count], r->res_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) remote_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (remote_count >= DLM_REMOVE_NAMES_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (!kref_put(&r->res_ref, kill_rsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) log_error(ls, "tossed rsb in use %s", r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (need_shrink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * While searching for rsb's to free, we found some that require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * remote removal. We leave them in place and find them again here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * so there is a very small gap between removing them from the toss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * list and sending the removal. Keeping this gap small is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * important to keep us (the master node) from being out of sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * with the remote dir node for very long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * From the time the rsb is removed from toss until just after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * send_remove, the rsb name is saved in ls_remove_name. A new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * lookup checks this to ensure that a new lookup message for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * same resource name is not sent just before the remove message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) for (i = 0; i < remote_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) name = ls->ls_remove_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) len = ls->ls_remove_lens[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) log_debug(ls, "remove_name not toss %s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (r->res_master_nodeid != our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) log_debug(ls, "remove_name master %d dir %d our %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) r->res_master_nodeid, r->res_dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) our_nodeid, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (r->res_dir_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /* should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) log_error(ls, "remove_name dir %d master %d our %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) r->res_dir_nodeid, r->res_master_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) our_nodeid, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (!time_after_eq(jiffies, r->res_toss_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) dlm_config.ci_toss_secs * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) log_debug(ls, "remove_name toss_time %lu now %lu %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) r->res_toss_time, jiffies, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!kref_put(&r->res_ref, kill_rsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) log_error(ls, "remove_name in use %s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* block lookup of same name until we've sent remove */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) spin_lock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ls->ls_remove_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) send_remove(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /* allow lookup of name again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) spin_lock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) ls->ls_remove_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) void dlm_scan_rsbs(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) for (i = 0; i < ls->ls_rsbtbl_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) shrink_bucket(ls, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (dlm_locking_stopped(ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static void add_timeout(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) goto add_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) goto add_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) add_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) mutex_lock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) mutex_unlock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static void del_timeout(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) mutex_lock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (!list_empty(&lkb->lkb_time_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) list_del_init(&lkb->lkb_time_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) mutex_unlock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) and then lock rsb because of lock ordering in add_timeout. We may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) to specify some special timeout-related bits in the lkb that are just to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) be accessed under the timeout_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) void dlm_scan_timeout(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) int do_cancel, do_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) s64 wait_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (dlm_locking_stopped(ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) do_cancel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) do_warn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) mutex_lock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) wait_us = ktime_to_us(ktime_sub(ktime_get(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) lkb->lkb_timestamp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) wait_us >= (lkb->lkb_timeout_cs * 10000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) do_cancel = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) wait_us >= dlm_config.ci_timewarn_cs * 10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) do_warn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (!do_cancel && !do_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) mutex_unlock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!do_cancel && !do_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (do_warn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* clear flag so we only warn once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) dlm_timeout_warn(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (do_cancel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) log_debug(ls, "timeout cancel %x node %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) _cancel_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) unhold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) dlm_recoverd before checking/setting ls_recover_begin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) void dlm_adjust_timeouts(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) ls->ls_recover_begin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) mutex_lock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) mutex_unlock(&ls->ls_timeout_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (!dlm_config.ci_waitwarn_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (ktime_to_us(lkb->lkb_wait_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) lkb->lkb_wait_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* lkb is master or local copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) int b, len = r->res_ls->ls_lvblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* b=1 lvb returned to caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) b=0 lvb written to rsb or invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) b=-1 do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (b == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) lkb->lkb_lvbseq = r->res_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) } else if (b == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) rsb_set_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) r->res_lvbseq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) lkb->lkb_lvbseq = r->res_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) rsb_clear_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (rsb_flag(r, RSB_VALNOTVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (lkb->lkb_grmode < DLM_LOCK_PW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) rsb_set_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (!r->res_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) r->res_lvbseq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) rsb_clear_flag(r, RSB_VALNOTVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /* lkb is process copy (pc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (b == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int len = receive_extralen(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (len > r->res_ls->ls_lvblen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) len = r->res_ls->ls_lvblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) lkb->lkb_lvbseq = ms->m_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /* Manipulate lkb's on rsb's convert/granted/waiting queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) remove_lock -- used for unlock, removes lkb from granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) revert_lock -- used for cancel, moves lkb from convert to granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) grant_lock -- used for request and convert, adds lkb to granted or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) moves lkb from convert or waiting to granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) Each of these is used for master or local copy lkb's. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) also a _pc() variation used to make the corresponding change on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) a process copy (pc) lkb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* this unhold undoes the original ref from create_lkb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) so this leads to the lkb being freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) set_lvb_unlock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) _remove_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) _remove_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /* returns: 0 did nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 1 moved lock to granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) -1 removed lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) lkb->lkb_rqmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) switch (lkb->lkb_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) case DLM_LKSTS_GRANTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) case DLM_LKSTS_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) move_lkb(r, lkb, DLM_LKSTS_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) case DLM_LKSTS_WAITING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /* this unhold undoes the original ref from create_lkb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) so this leads to the lkb being freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) rv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) log_print("invalid status for revert %d", lkb->lkb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return revert_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (lkb->lkb_grmode != lkb->lkb_rqmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) lkb->lkb_grmode = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (lkb->lkb_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) move_lkb(r, lkb, DLM_LKSTS_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) add_lkb(r, lkb, DLM_LKSTS_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) lkb->lkb_rqmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) lkb->lkb_highbast = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) set_lvb_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) _grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) set_lvb_lock_pc(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) _grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /* called by grant_pending_locks() which means an async grant message must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) be sent to the requesting node in addition to granting the lock if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) lkb belongs to a remote node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) send_grant(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) change the granted/requested modes. We're munging things accordingly in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) the process copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) CONVDEADLK: our grmode may have been forced down to NL to resolve a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) conversion deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) compatible with other granted locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) static void munge_demoted(struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) log_print("munge_demoted %x invalid modes gr %d rq %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) lkb->lkb_grmode = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) ms->m_type != DLM_MSG_GRANT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) log_print("munge_altmode %x invalid reply type %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) lkb->lkb_id, ms->m_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (lkb->lkb_exflags & DLM_LKF_ALTPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) lkb->lkb_rqmode = DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) lkb->lkb_rqmode = DLM_LOCK_CW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) lkb_statequeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (lkb->lkb_id == first->lkb_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /* Check if the given lkb conflicts with another lkb on the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) struct dlm_lkb *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) list_for_each_entry(this, head, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (this == lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (!modes_compat(this, lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * "A conversion deadlock arises with a pair of lock requests in the converting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * queue for one resource. The granted mode of each lock blocks the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * mode of the other lock."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * convert queue from being granted, then deadlk/demote lkb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * Granted Queue: empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * Convert Queue: NL->EX (first lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * PR->EX (second lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * The first lock can't be granted because of the granted mode of the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * lock and the second lock can't be granted because it's not first in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * flag set and return DEMOTED in the lksb flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * Originally, this function detected conv-deadlk in a more limited scope:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * - if lkb1 was the first entry in the queue (not just earlier), and was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * blocked by the granted mode of lkb2, and there was nothing on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * granted queue preventing lkb1 from being granted immediately, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * lkb2 was the only thing preventing lkb1 from being granted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * That second condition meant we'd only say there was conv-deadlk if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * resolving it (by demotion) would lead to the first lock on the convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * queue being granted right away. It allowed conversion deadlocks to exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * between locks on the convert queue while they couldn't be granted anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * Now, we detect and take action on conversion deadlocks immediately when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * they're created, even if they may not be immediately consequential. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * mode that would prevent lkb1's conversion from being granted, we do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * I think this means that the lkb_is_ahead condition below should always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * be zero, i.e. there will never be conv-deadlk between two locks that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * both already on the convert queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct dlm_lkb *lkb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) int lkb_is_ahead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (lkb1 == lkb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) lkb_is_ahead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (!lkb_is_ahead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (!modes_compat(lkb2, lkb1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (!modes_compat(lkb2, lkb1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) !modes_compat(lkb1, lkb2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * Return 1 if the lock can be granted, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * Also detect and resolve conversion deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * lkb is the lock to be granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * now is 1 if the function is being called in the context of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * immediate request, it is 0 if called later, after the lock has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * recover is 1 if dlm_recover_grant() is trying to grant conversions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * after recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) int recover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * 6-10: Version 5.4 introduced an option to address the phenomenon of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * a new request for a NL mode lock being blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * 6-11: If the optional EXPEDITE flag is used with the new NL mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * request, then it would be granted. In essence, the use of this flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * tells the Lock Manager to expedite theis request by not considering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * what may be in the CONVERTING or WAITING queues... As of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * writing, the EXPEDITE flag can be used only with new requests for NL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * mode locks. This flag is not valid for conversion requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * A shortcut. Earlier checks return an error if EXPEDITE is used in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * conversion or used with a non-NL requested mode. We also know an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * EXPEDITE request is always granted immediately, so now must always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * be 1. The full condition to grant an expedite request: (now &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * therefore be shortened to just checking the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * added to the remaining conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (queue_conflict(&r->res_grantqueue, lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * 6-3: By default, a conversion request is immediately granted if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * requested mode is compatible with the modes of all other granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (queue_conflict(&r->res_convertqueue, lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) * The RECOVER_GRANT flag means dlm_recover_grant() is granting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * locks for a recovered rsb, on which lkb's have been rebuilt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * The lkb's may have been rebuilt on the queues in a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * order than they were in on the previous master. So, granting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * queued conversions in order after recovery doesn't make sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * since the order hasn't been preserved anyway. The new order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) * could also have created a new "in place" conversion deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * After recovery, there would be no granted locks, and possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * recovery, grant conversions without considering order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (conv && recover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * 6-5: But the default algorithm for deciding whether to grant or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * queue conversion requests does not by itself guarantee that such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * requests are serviced on a "first come first serve" basis. This, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * turn, can lead to a phenomenon known as "indefinate postponement".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * 6-7: This issue is dealt with by using the optional QUECVT flag with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * the system service employed to request a lock conversion. This flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) * forces certain conversion requests to be queued, even if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) * compatible with the granted modes of other locks on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) * resource. Thus, the use of this flag results in conversion requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * being ordered on a "first come first servce" basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * DCT: This condition is all about new conversions being able to occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * "in place" while the lock remains on the granted queue (assuming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * doesn't _have_ to go onto the convert queue where it's processed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) * order. The "now" variable is necessary to distinguish converts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) * being received and processed for the first time now, because once a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) * convert is moved to the conversion queue the condition below applies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) * requiring fifo granting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * Even if the convert is compat with all granted locks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * QUECVT forces it behind other locks on the convert queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (list_empty(&r->res_convertqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * The NOORDER flag is set to avoid the standard vms rules on grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (lkb->lkb_exflags & DLM_LKF_NOORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) * granted until all other conversion requests ahead of it are granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) * and/or canceled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * 6-4: By default, a new request is immediately granted only if all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * three of the following conditions are satisfied when the request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * issued:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * - The queue of ungranted conversion requests for the resource is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * - The queue of ungranted new requests for the resource is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * - The mode of the new request is compatible with the most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * restrictive mode of all granted locks on the resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (now && !conv && list_empty(&r->res_convertqueue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) list_empty(&r->res_waitqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * 6-4: Once a lock request is in the queue of ungranted new requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) * it cannot be granted until the queue of ungranted conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * requests is empty, all ungranted new requests ahead of it are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * granted and/or canceled, and it is compatible with the granted mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * of the most restrictive lock granted on the resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (!now && !conv && list_empty(&r->res_convertqueue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) first_in_list(lkb, &r->res_waitqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) int recover, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) int8_t alt = 0, rqmode = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) rv = _can_be_granted(r, lkb, now, recover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * The CONVDEADLK flag is non-standard and tells the dlm to resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * conversion deadlocks by demoting grmode to NL, otherwise the dlm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * cancels one of the locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (is_convert && can_be_queued(lkb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) conversion_deadlock_detect(r, lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) lkb->lkb_grmode = DLM_LOCK_NL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) } else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) *err = -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) log_print("can_be_granted deadlock %x now %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) lkb->lkb_id, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * to grant a request in a mode other than the normal rqmode. It's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * simple way to provide a big optimization to applications that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * use them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) alt = DLM_LOCK_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) alt = DLM_LOCK_CW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (alt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) lkb->lkb_rqmode = alt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) rv = _can_be_granted(r, lkb, now, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) lkb->lkb_rqmode = rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /* Returns the highest requested mode of all blocked conversions; sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) cw if there's a blocked conversion to DLM_LOCK_CW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct dlm_lkb *lkb, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int recover = rsb_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) int hi, demoted, quit, grant_restart, demote_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) int deadlk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) quit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) grant_restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) demote_restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) hi = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) demoted = is_demoted(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) deadlk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) grant_lock_pending(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) grant_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) (*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (!demoted && is_demoted(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) log_print("WARN: pending demoted %x node %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) demote_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (deadlk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * If DLM_LKB_NODLKWT flag is set and conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * deadlock is detected, we request blocking AST and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * down (or cancel) conversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (lkb->lkb_highbast < lkb->lkb_rqmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) queue_bast(r, lkb, lkb->lkb_rqmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) lkb->lkb_highbast = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) log_print("WARN: pending deadlock %x node %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) lkb->lkb_id, lkb->lkb_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) hi = max_t(int, lkb->lkb_rqmode, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) *cw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (grant_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (demote_restart && !quit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) quit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return max_t(int, high, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct dlm_lkb *lkb, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (can_be_granted(r, lkb, 0, 0, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) grant_lock_pending(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) (*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) high = max_t(int, lkb->lkb_rqmode, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (lkb->lkb_rqmode == DLM_LOCK_CW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) *cw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) return high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) on either the convert or waiting queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) high is the largest rqmode of all locks blocked on the convert or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) waiting queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (gr->lkb_highbast < DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (gr->lkb_highbast < high &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct dlm_lkb *lkb, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) int high = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int cw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (!is_master(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) high = grant_pending_convert(r, high, &cw, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) high = grant_pending_wait(r, high, &cw, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (high == DLM_LOCK_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * If there are locks left on the wait/convert queue then send blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * ASTs to granted locks based on the largest requested mode (high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) * found above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (cw && high == DLM_LOCK_PR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) lkb->lkb_grmode == DLM_LOCK_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) queue_bast(r, lkb, DLM_LOCK_CW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) queue_bast(r, lkb, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) lkb->lkb_highbast = high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (gr->lkb_highbast < DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct dlm_lkb *gr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) list_for_each_entry(gr, head, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) /* skip self when sending basts to convertqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (gr == lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) queue_bast(r, gr, lkb->lkb_rqmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) gr->lkb_highbast = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) send_bast_queue(r, &r->res_grantqueue, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) send_bast_queue(r, &r->res_grantqueue, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) send_bast_queue(r, &r->res_convertqueue, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /* set_master(r, lkb) -- set the master nodeid of a resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) The purpose of this function is to set the nodeid field in the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) lkb using the nodeid field in the given rsb. If the rsb's nodeid is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) known, it can just be copied to the lkb and the function will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 0. If the rsb's nodeid is _not_ known, it needs to be looked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) before it can be copied to the lkb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) When the rsb nodeid is being looked up remotely, the initial lkb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) causing the lookup is kept on the ls_waiters list waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) lookup reply. Other lkb's waiting for the same rsb lookup are kept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) on the rsb's res_lookup list until the master is verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 1: the rsb master is not available and the lkb has been placed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) a wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) int our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) r->res_first_lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) lkb->lkb_nodeid = r->res_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (r->res_master_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) lkb->lkb_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (r->res_master_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) lkb->lkb_nodeid = r->res_master_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (dlm_dir_nodeid(r) == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /* This is a somewhat unusual case; find_rsb will usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) have set res_master_nodeid when dir nodeid is local, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) there are cases where we become the dir node after we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) past find_rsb and go through _request_lock again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) confirm_master() or process_lookup_list() needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) called after this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) r->res_master_nodeid = our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) lkb->lkb_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) wait_pending_remove(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) r->res_first_lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) send_lookup(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static void process_lookup_list(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) list_del_init(&lkb->lkb_rsb_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) /* confirm_master -- confirm (or deny) an rsb's master nodeid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) static void confirm_master(struct dlm_rsb *r, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (!r->res_first_lkid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) r->res_first_lkid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) process_lookup_list(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) case -EBADR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) case -ENOTBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /* the remote request failed and won't be retried (it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) a NOQUEUE, or has been canceled/unlocked); make a waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) lkb the first_lkid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) r->res_first_lkid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (!list_empty(&r->res_lookup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) lkb_rsb_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) list_del_init(&lkb->lkb_rsb_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) r->res_first_lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) log_error(r->res_ls, "confirm_master unknown error %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) int namelen, unsigned long timeout_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) void (*ast) (void *astparam),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) void *astparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) void (*bast) (void *astparam, int mode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) int rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) /* check for invalid arg usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (mode < 0 || mode > DLM_LOCK_EX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (flags & DLM_LKF_CANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (!ast || !lksb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /* these args will be copied to the lkb in validate_lock_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) it cannot be done now because when converting locks, fields in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) an active lkb cannot be modified before locking the rsb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) args->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) args->astfn = ast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) args->astparam = astparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) args->bastfn = bast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) args->timeout = timeout_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) args->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) args->lksb = lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) DLM_LKF_FORCEUNLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) args->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) args->astparam = astarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (args->flags & DLM_LKF_CONVERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (lkb->lkb_flags & DLM_IFL_MSTCPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (args->flags & DLM_LKF_QUECVT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (lkb->lkb_status != DLM_LKSTS_GRANTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) if (lkb->lkb_wait_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (is_overlap(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) lkb->lkb_exflags = args->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) lkb->lkb_sbflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) lkb->lkb_astfn = args->astfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) lkb->lkb_astparam = args->astparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) lkb->lkb_bastfn = args->bastfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) lkb->lkb_rqmode = args->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) lkb->lkb_lksb = args->lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) lkb->lkb_ownpid = (int) current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) lkb->lkb_timeout_cs = args->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) lkb->lkb_status, lkb->lkb_wait_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) lkb->lkb_resource->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) for success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) because there may be a lookup in progress and it's valid to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) cancel/unlockf on it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) struct dlm_ls *ls = lkb->lkb_resource->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) int rv = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /* an lkb may still exist even though the lock is EOL'ed due to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) cancel, unlock or failed noqueue request; an app can't use these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) locks; return same error as if the lkid had not been found at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) rv = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /* an lkb may be waiting for an rsb lookup to complete where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) lookup was initiated by another lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (!list_empty(&lkb->lkb_rsb_lookup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) list_del_init(&lkb->lkb_rsb_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) queue_cast(lkb->lkb_resource, lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) args->flags & DLM_LKF_CANCEL ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) -DLM_ECANCEL : -DLM_EUNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) unhold_lkb(lkb); /* undoes create_lkb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) /* cancel not allowed with another cancel/unlock in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (args->flags & DLM_LKF_CANCEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (lkb->lkb_exflags & DLM_LKF_CANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (is_overlap(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) /* don't let scand try to do a cancel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (lkb->lkb_flags & DLM_IFL_RESEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /* there's nothing to cancel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) !lkb->lkb_wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) switch (lkb->lkb_wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /* add_to_waiters() will set OVERLAP_CANCEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) goto out_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) /* do we need to allow a force-unlock if there's a normal unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) already in progress? in what conditions could the normal unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) fail such that we'd want to send a force-unlock to be sure? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (args->flags & DLM_LKF_FORCEUNLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (is_overlap_unlock(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) /* don't let scand try to do a cancel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (lkb->lkb_flags & DLM_IFL_RESEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) switch (lkb->lkb_wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /* add_to_waiters() will set OVERLAP_UNLOCK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) goto out_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /* normal unlock not allowed if there's any op in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) rv = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (lkb->lkb_wait_type || lkb->lkb_wait_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) out_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) /* an overlapping op shouldn't blow away exflags from other op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) lkb->lkb_exflags |= args->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) lkb->lkb_sbflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) lkb->lkb_astparam = args->astparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) args->flags, lkb->lkb_wait_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) lkb->lkb_resource->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * Four stage 4 varieties:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) * do_request(), do_convert(), do_unlock(), do_cancel()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) * These are called on the master node for the given lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * from the central locking logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (can_be_granted(r, lkb, 1, 0, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (can_be_queued(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) add_lkb(r, lkb, DLM_LKSTS_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) add_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) queue_cast(r, lkb, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) if (force_blocking_asts(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) send_blocking_asts_all(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) send_blocking_asts(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) int deadlk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /* changing an existing lock may allow others to be granted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) /* can_be_granted() detected that this lock would block in a conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) deadlock, so we leave it on the granted queue and return EDEADLK in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) the ast for the convert. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) /* it's left on the granted queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) revert_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) queue_cast(r, lkb, -EDEADLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) error = -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) /* is_demoted() means the can_be_granted() above set the grmode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) to NL, and left us on the granted queue. This auto-demotion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) (due to CONVDEADLK) might mean other locks, and/or this lock, are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) now grantable. We have to try to grant other converting locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) before we try again to grant this one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (is_demoted(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (_can_be_granted(r, lkb, 1, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) grant_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) /* else fall through and move to convert queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (can_be_queued(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) error = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) add_lkb(r, lkb, DLM_LKSTS_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) add_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) queue_cast(r, lkb, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) grant_pending_locks(r, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) /* grant_pending_locks also sends basts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (force_blocking_asts(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) send_blocking_asts_all(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) send_blocking_asts(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) remove_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) queue_cast(r, lkb, -DLM_EUNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) return -DLM_EUNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) grant_pending_locks(r, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) error = revert_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) queue_cast(r, lkb, -DLM_ECANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) return -DLM_ECANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) grant_pending_locks(r, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * Four stage 3 varieties:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) /* add a new lkb to a possibly new rsb, called by requesting process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) /* set_master: sets lkb nodeid from r */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) error = set_master(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (is_remote(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) /* receive_request() calls do_request() on remote node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) error = send_request(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) error = do_request(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) /* for remote locks the request_reply is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) between do_request and do_request_effects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) do_request_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) /* change some property of an existing lkb, e.g. mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (is_remote(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) /* receive_convert() calls do_convert() on remote node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) error = send_convert(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) error = do_convert(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) /* for remote locks the convert_reply is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) between do_convert and do_convert_effects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) do_convert_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) /* remove an existing lkb from the granted queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if (is_remote(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) /* receive_unlock() calls do_unlock() on remote node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) error = send_unlock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) error = do_unlock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* for remote locks the unlock_reply is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) between do_unlock and do_unlock_effects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) do_unlock_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) /* remove an existing lkb from the convert or wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (is_remote(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) /* receive_cancel() calls do_cancel() on remote node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) error = send_cancel(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) error = do_cancel(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) /* for remote locks the cancel_reply is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) between do_cancel and do_cancel_effects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) do_cancel_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * Four stage 2 varieties:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) int len, struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) error = validate_lock_args(ls, lkb, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) attach_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) error = _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) error = validate_lock_args(ls, lkb, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) error = _convert_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) error = validate_unlock_args(lkb, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) error = _unlock_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) struct dlm_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) error = validate_unlock_args(lkb, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) error = _cancel_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * Two stage 1 varieties: dlm_lock() and dlm_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) int dlm_lock(dlm_lockspace_t *lockspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) struct dlm_lksb *lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) uint32_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) void *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) unsigned int namelen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) uint32_t parent_lkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) void (*ast) (void *astarg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) void *astarg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) void (*bast) (void *astarg, int mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) struct dlm_ls *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) int error, convert = flags & DLM_LKF_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) ls = dlm_find_lockspace_local(lockspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) if (!ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) if (convert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) error = find_lkb(ls, lksb->sb_lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) error = create_lkb(ls, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) astarg, bast, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) if (convert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) error = convert_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) error = request_lock(ls, lkb, name, namelen, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) if (error == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) if (convert || error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) if (error == -EAGAIN || error == -EDEADLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) dlm_put_lockspace(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) int dlm_unlock(dlm_lockspace_t *lockspace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) uint32_t lkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) uint32_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) struct dlm_lksb *lksb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) void *astarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) struct dlm_ls *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) ls = dlm_find_lockspace_local(lockspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (!ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) error = set_unlock_args(flags, astarg, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (flags & DLM_LKF_CANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) error = cancel_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) error = unlock_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) dlm_put_lockspace(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) * send/receive routines for remote operations and replies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) * send_args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) * send_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) * send_request receive_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) * send_convert receive_convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) * send_unlock receive_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) * send_cancel receive_cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) * send_grant receive_grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) * send_bast receive_bast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) * send_lookup receive_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) * send_remove receive_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) * send_common_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) * receive_request_reply send_request_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) * receive_convert_reply send_convert_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) * receive_unlock_reply send_unlock_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) * receive_cancel_reply send_cancel_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * receive_lookup_reply send_lookup_reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) static int _create_message(struct dlm_ls *ls, int mb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) int to_nodeid, int mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) struct dlm_message **ms_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) struct dlm_mhandle **mh_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) char *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) /* get_buffer gives us a message handle (mh) that we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) pass into lowcomms_commit and a message buffer (mb) that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) write our data into */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (!mh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) memset(mb, 0, mb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) ms = (struct dlm_message *) mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) ms->m_header.h_lockspace = ls->ls_global_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) ms->m_header.h_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ms->m_header.h_length = mb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) ms->m_header.h_cmd = DLM_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) ms->m_type = mstype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) *mh_ret = mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) *ms_ret = ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) int to_nodeid, int mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) struct dlm_message **ms_ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) struct dlm_mhandle **mh_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) int mb_len = sizeof(struct dlm_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) switch (mstype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) case DLM_MSG_REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) mb_len += r->res_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) case DLM_MSG_REQUEST_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) case DLM_MSG_CONVERT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) case DLM_MSG_GRANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (lkb && lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) mb_len += r->res_ls->ls_lvblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) ms_ret, mh_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) /* further lowcomms enhancements or alternate implementations may make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) the return value from this function useful at some point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) dlm_message_out(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) dlm_lowcomms_commit_buffer(mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) ms->m_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) ms->m_pid = lkb->lkb_ownpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) ms->m_lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) ms->m_remid = lkb->lkb_remid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) ms->m_exflags = lkb->lkb_exflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) ms->m_sbflags = lkb->lkb_sbflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) ms->m_flags = lkb->lkb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) ms->m_lvbseq = lkb->lkb_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) ms->m_status = lkb->lkb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) ms->m_grmode = lkb->lkb_grmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) ms->m_rqmode = lkb->lkb_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) ms->m_hash = r->res_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) /* m_result and m_bastmode are set from function args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) not from lkb fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (lkb->lkb_bastfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) ms->m_asts |= DLM_CB_BAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (lkb->lkb_astfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) ms->m_asts |= DLM_CB_CAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) /* compare with switch in create_message; send_remove() doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) use send_args() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) switch (ms->m_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) memcpy(ms->m_extra, r->res_name, r->res_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) case DLM_MSG_REQUEST_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) case DLM_MSG_CONVERT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) case DLM_MSG_GRANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) to_nodeid = r->res_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) error = add_to_waiters(lkb, mstype, to_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) send_args(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) remove_from_waiters(lkb, msg_reply_type(mstype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) return send_common(r, lkb, DLM_MSG_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) error = send_common(r, lkb, DLM_MSG_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) /* down conversions go without a reply from the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (!error && down_conversion(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) r->res_ls->ls_stub_ms.m_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) /* FIXME: if this lkb is the only lock we hold on the rsb, then set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) MASTER_UNCERTAIN to force the next request on the rsb to confirm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) that the master is still correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) return send_common(r, lkb, DLM_MSG_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) return send_common(r, lkb, DLM_MSG_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) to_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) send_args(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) ms->m_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) to_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) send_args(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) ms->m_bastmode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) to_nodeid = dlm_dir_nodeid(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) send_args(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) static int send_remove(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) to_nodeid = dlm_dir_nodeid(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) memcpy(ms->m_extra, r->res_name, r->res_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) ms->m_hash = r->res_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) int mstype, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) int to_nodeid, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) to_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) send_args(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) ms->m_result = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) int ret_nodeid, int rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) struct dlm_rsb *r = &ls->ls_stub_rsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) int error, nodeid = ms_in->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) ms->m_lkid = ms_in->m_lkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) ms->m_result = rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) ms->m_nodeid = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) error = send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) /* which args we save from a received message depends heavily on the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) of message, unlike the send side where we can safely send everything about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) the lkb for any type of message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) lkb->lkb_exflags = ms->m_exflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) lkb->lkb_sbflags = ms->m_sbflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) (ms->m_flags & 0x0000FFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) if (ms->m_flags == DLM_IFL_STUB_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) lkb->lkb_sbflags = ms->m_sbflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) (ms->m_flags & 0x0000FFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) static int receive_extralen(struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) return (ms->m_header.h_length - sizeof(struct dlm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) len = receive_extralen(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) if (len > ls->ls_lvblen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) len = ls->ls_lvblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) static void fake_bastfn(void *astparam, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) log_print("fake_bastfn should not be called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) static void fake_astfn(void *astparam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) log_print("fake_astfn should not be called");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) lkb->lkb_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) lkb->lkb_ownpid = ms->m_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) lkb->lkb_remid = ms->m_lkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) lkb->lkb_rqmode = ms->m_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) /* lkb was just created so there won't be an lvb yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if (lkb->lkb_status != DLM_LKSTS_GRANTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) if (receive_lvb(ls, lkb, ms))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) lkb->lkb_rqmode = ms->m_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) lkb->lkb_lvbseq = ms->m_lvbseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (receive_lvb(ls, lkb, ms))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) uses to send a reply and that the remote end uses to process the reply. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) struct dlm_lkb *lkb = &ls->ls_stub_lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) lkb->lkb_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) lkb->lkb_remid = ms->m_lkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) /* This is called after the rsb is locked so that we can safely inspect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) fields in the lkb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) int from = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) /* currently mixing of user/kernel locks are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) log_error(lkb->lkb_resource->res_ls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) "got user dlm message for a kernel lock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) switch (ms->m_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) case DLM_MSG_CONVERT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) case DLM_MSG_UNLOCK_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) case DLM_MSG_CANCEL_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) case DLM_MSG_GRANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) case DLM_MSG_BAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) case DLM_MSG_REQUEST_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) if (!is_process_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) log_error(lkb->lkb_resource->res_ls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) "ignore invalid message %d from %d %x %x %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) lkb->lkb_flags, lkb->lkb_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) char name[DLM_RESNAME_MAXLEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) uint32_t hash, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) int rv, dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) memset(name, 0, sizeof(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) memcpy(name, ms_name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) hash = jhash(name, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) b = hash & (ls->ls_rsbtbl_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) dir_nodeid = dlm_hash2nodeid(ls, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) log_error(ls, "repeat_remove on keep %s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) if (!rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) log_error(ls, "repeat_remove on toss %s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) /* use ls->remove_name2 to avoid conflict with shrink? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) spin_lock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) ls->ls_remove_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) rv = _create_message(ls, sizeof(struct dlm_message) + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) memcpy(ms->m_extra, name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) ms->m_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) spin_lock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) ls->ls_remove_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) spin_unlock(&ls->ls_remove_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) int from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) int error, namelen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) from_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) error = create_lkb(ls, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) receive_flags(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) lkb->lkb_flags |= DLM_IFL_MSTCPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) error = receive_request_args(ls, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /* The dir node is the authority on whether we are the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) for this rsb or not, so if the master sends us a request, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) recreate the rsb if we've destroyed it. This race happens when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) send a remove message to the dir node at the same time that the dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) node sends us a request for the rsb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) namelen = receive_extralen(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) R_RECEIVE_REQUEST, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (r->res_master_nodeid != dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) error = validate_master_nodeid(ls, r, from_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) attach_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) error = do_request(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) send_request_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) do_request_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (error == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) and do this receive_request again from process_lookup_list once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) we get the lookup reply. This would avoid a many repeated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) ENOTBLK request failures when the lookup reply designating us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) as master is delayed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) /* We could repeatedly return -EBADR here if our send_remove() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) delayed in being sent/arriving/being processed on the dir node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) Another node would repeatedly lookup up the master, and the dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) node would continue returning our nodeid until our send_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) took effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) We send another remove message in case our previous send_remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) was lost/ignored/missed somehow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) if (error != -ENOTBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) log_limit(ls, "receive_request %x from %d %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) ms->m_lkid, from_nodeid, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) if (namelen && error == -EBADR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) send_repeat_remove(ls, ms->m_extra, namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) setup_stub_lkb(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) int error, reply = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) if (lkb->lkb_remid != ms->m_lkid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) log_error(ls, "receive_convert %x remid %x recover_seq %llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) (unsigned long long)lkb->lkb_recover_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) ms->m_header.h_nodeid, ms->m_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) receive_flags(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) error = receive_convert_args(ls, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) send_convert_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) reply = !down_conversion(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) error = do_convert(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) send_convert_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) do_convert_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) setup_stub_lkb(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) if (lkb->lkb_remid != ms->m_lkid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) log_error(ls, "receive_unlock %x remid %x remote %d %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) lkb->lkb_id, lkb->lkb_remid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) ms->m_header.h_nodeid, ms->m_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) receive_flags(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) error = receive_unlock_args(ls, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) send_unlock_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) error = do_unlock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) send_unlock_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) do_unlock_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) setup_stub_lkb(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) receive_flags(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) error = do_cancel(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) send_cancel_reply(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) do_cancel_effects(r, lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) setup_stub_lkb(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (is_altmode(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) munge_altmode(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) grant_lock_pc(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) queue_bast(r, lkb, ms->m_bastmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) lkb->lkb_highbast = ms->m_bastmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) int len, error, ret_nodeid, from_nodeid, our_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) from_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) our_nodeid = dlm_our_nodeid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) len = receive_extralen(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) &ret_nodeid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) /* Optimization: we're master so treat lookup as a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (!error && ret_nodeid == our_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) receive_request(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) send_lookup_reply(ls, ms, ret_nodeid, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) char name[DLM_RESNAME_MAXLEN+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) uint32_t hash, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) int rv, len, dir_nodeid, from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) from_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) len = receive_extralen(ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (len > DLM_RESNAME_MAXLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) log_error(ls, "receive_remove from %d bad len %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) from_nodeid, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (dir_nodeid != dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) log_error(ls, "receive_remove from %d bad nodeid %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) from_nodeid, dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) /* Look for name on rsbtbl.toss, if it's there, kill it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) If it's on rsbtbl.keep, it's being used, and we should ignore this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) message. This is an expected race between the dir node sending a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) request to the master node at the same time as the master node sends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) a remove to the dir node. The resolution to that race is for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) dir node to ignore the remove message, and the master node to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) recreate the master rsb when it gets a request from the dir node for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) an rsb it doesn't have. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) memset(name, 0, sizeof(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) memcpy(name, ms->m_extra, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) hash = jhash(name, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) b = hash & (ls->ls_rsbtbl_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) spin_lock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) /* verify the rsb is on keep list per comment above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) log_error(ls, "receive_remove from %d not found %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) from_nodeid, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) if (r->res_master_nodeid != from_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) log_error(ls, "receive_remove keep from %d master %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) from_nodeid, r->res_master_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) log_debug(ls, "receive_remove from %d master %d first %x %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) from_nodeid, r->res_master_nodeid, r->res_first_lkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) if (r->res_master_nodeid != from_nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) log_error(ls, "receive_remove toss from %d master %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) from_nodeid, r->res_master_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) if (kref_put(&r->res_ref, kill_rsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) dlm_free_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) log_error(ls, "receive_remove from %d rsb ref error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) from_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) spin_unlock(&ls->ls_rsbtbl[b].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) do_purge(ls, ms->m_nodeid, ms->m_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) int error, mstype, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) int from_nodeid = ms->m_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) mstype = lkb->lkb_wait_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) log_error(ls, "receive_request_reply %x remote %d %x result %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) /* Optimization: the dir node was also the master, so it took our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) lookup as a request and sent request reply instead of lookup reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) if (mstype == DLM_MSG_LOOKUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) r->res_master_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) r->res_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) lkb->lkb_nodeid = from_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) /* this is the value returned from do_request() on the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) result = ms->m_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) /* request would block (be queued) on remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) queue_cast(r, lkb, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) confirm_master(r, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) unhold_lkb(lkb); /* undoes create_lkb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) /* request was queued or granted on remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) lkb->lkb_remid = ms->m_lkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) if (is_altmode(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) munge_altmode(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) add_lkb(r, lkb, DLM_LKSTS_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) add_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) grant_lock_pc(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) confirm_master(r, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) case -EBADR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) case -ENOTBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) /* find_rsb failed to find rsb or rsb wasn't master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) log_limit(ls, "receive_request_reply %x from %d %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) "master %d dir %d first %x %s", lkb->lkb_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) from_nodeid, result, r->res_master_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) r->res_dir_nodeid, r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) if (r->res_dir_nodeid != dlm_our_nodeid() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) r->res_master_nodeid != dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) /* cause _request_lock->set_master->send_lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) r->res_master_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) r->res_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) lkb->lkb_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) if (is_overlap(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) /* we'll ignore error in cancel/unlock reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) queue_cast_overlap(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) confirm_master(r, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) unhold_lkb(lkb); /* undoes create_lkb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) if (r->res_master_nodeid == dlm_our_nodeid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) confirm_master(r, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) log_error(ls, "receive_request_reply %x error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) lkb->lkb_id, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) log_debug(ls, "receive_request_reply %x result %d unlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) lkb->lkb_id, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) send_unlock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) send_cancel(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) /* this is the value returned from do_convert() on the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) switch (ms->m_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) /* convert would block (be queued) on remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) queue_cast(r, lkb, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) case -EDEADLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) revert_lock_pc(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) queue_cast(r, lkb, -EDEADLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) /* convert was queued on remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) if (is_demoted(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) munge_demoted(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) add_lkb(r, lkb, DLM_LKSTS_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) add_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) /* convert was granted on remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) if (is_demoted(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) munge_demoted(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) grant_lock_pc(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) queue_cast(r, lkb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) ms->m_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) dlm_print_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) dlm_print_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) struct dlm_rsb *r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) /* stub reply can happen with waiters_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) error = remove_from_waiters_ms(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) __receive_convert_reply(r, lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) _receive_convert_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) struct dlm_rsb *r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) /* stub reply can happen with waiters_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) error = remove_from_waiters_ms(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) /* this is the value returned from do_unlock() on the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) switch (ms->m_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) case -DLM_EUNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) remove_lock_pc(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) queue_cast(r, lkb, -DLM_EUNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) log_error(r->res_ls, "receive_unlock_reply %x error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) lkb->lkb_id, ms->m_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) _receive_unlock_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) struct dlm_rsb *r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) error = validate_message(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) /* stub reply can happen with waiters_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) error = remove_from_waiters_ms(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) /* this is the value returned from do_cancel() on the master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) switch (ms->m_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) case -DLM_ECANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) receive_flags_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) revert_lock_pc(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) queue_cast(r, lkb, -DLM_ECANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) log_error(r->res_ls, "receive_cancel_reply %x error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) lkb->lkb_id, ms->m_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) error = find_lkb(ls, ms->m_remid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) _receive_cancel_reply(lkb, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) int error, ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) int do_lookup_list = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) error = find_lkb(ls, ms->m_lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /* ms->m_result is the value returned by dlm_master_lookup on dir node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) FIXME: will a non-zero error ever be returned? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) ret_nodeid = ms->m_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) /* We sometimes receive a request from the dir node for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) rsb before we've received the dir node's loookup_reply for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) The request from the dir node implies we're the master, so we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) ourself as master in receive_request_reply, and verify here that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) we are indeed the master. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) /* This should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) log_error(ls, "receive_lookup_reply %x from %d ret %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) "master %d dir %d our %d first %x %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) r->res_master_nodeid, r->res_dir_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) dlm_our_nodeid(), r->res_first_lkid, r->res_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) if (ret_nodeid == dlm_our_nodeid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) r->res_master_nodeid = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) r->res_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) do_lookup_list = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) r->res_first_lkid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) } else if (ret_nodeid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) /* the remote node doesn't believe it's the dir node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) lkb->lkb_id, ms->m_header.h_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) r->res_master_nodeid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) r->res_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) lkb->lkb_nodeid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) /* set_master() will set lkb_nodeid from r */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) r->res_master_nodeid = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) r->res_nodeid = ret_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) if (is_overlap(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) log_debug(ls, "receive_lookup_reply %x unlock %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) lkb->lkb_id, lkb->lkb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) queue_cast_overlap(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) unhold_lkb(lkb); /* undoes create_lkb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) goto out_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) out_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) if (do_lookup_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) process_lookup_list(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) uint32_t saved_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) int error = 0, noent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) log_limit(ls, "receive %d from non-member %d %x %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) ms->m_remid, ms->m_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) switch (ms->m_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) /* messages sent to a master node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) error = receive_request(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) error = receive_convert(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) error = receive_unlock(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) noent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) error = receive_cancel(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) /* messages sent from a master node (replies to above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) case DLM_MSG_REQUEST_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) error = receive_request_reply(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) case DLM_MSG_CONVERT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) error = receive_convert_reply(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) case DLM_MSG_UNLOCK_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) error = receive_unlock_reply(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) case DLM_MSG_CANCEL_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) error = receive_cancel_reply(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) /* messages sent from a master node (only two types of async msg) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) case DLM_MSG_GRANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) noent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) error = receive_grant(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) case DLM_MSG_BAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) noent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) error = receive_bast(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) /* messages sent to a dir node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) receive_lookup(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) case DLM_MSG_REMOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) receive_remove(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) /* messages sent from a dir node (remove has no reply) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) case DLM_MSG_LOOKUP_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) receive_lookup_reply(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) /* other messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) case DLM_MSG_PURGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) receive_purge(ls, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) log_error(ls, "unknown message type %d", ms->m_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) * When checking for ENOENT, we're checking the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) * find_lkb(m_remid):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) * The lock id referenced in the message wasn't found. This may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) * happen in normal usage for the async messages and cancel, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) * only use log_debug for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) * Some errors are expected and normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) if (error == -ENOENT && noent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) ms->m_lkid, saved_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) } else if (error == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) ms->m_lkid, saved_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) if (ms->m_type == DLM_MSG_CONVERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) dlm_dump_rsb_hash(ls, ms->m_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) if (error == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) log_error(ls, "receive %d inval from %d lkid %x remid %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) "saved_seq %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) ms->m_type, ms->m_header.h_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) ms->m_lkid, ms->m_remid, saved_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) /* If the lockspace is in recovery mode (locking stopped), then normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) messages are saved on the requestqueue for processing after recovery is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) done. When not in recovery mode, we wait for dlm_recoverd to drain saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) messages off the requestqueue before we process new ones. This occurs right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) after recovery completes when we transition from saving all messages on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) requestqueue, to processing all the saved messages, to processing new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) messages as they arrive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) if (dlm_locking_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) /* If we were a member of this lockspace, left, and rejoined,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) other nodes may still be sending us messages from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) lockspace generation before we left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) if (!ls->ls_generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) log_limit(ls, "receive %d from %d ignore old gen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) ms->m_type, nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) dlm_add_requestqueue(ls, nodeid, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) dlm_wait_requestqueue(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) _receive_message(ls, ms, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) /* This is called by dlm_recoverd to process messages that were saved on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) the requestqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) uint32_t saved_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) _receive_message(ls, ms, saved_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) /* This is called by the midcomms layer when something is received for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) the lockspace. It could be either a MSG (normal message sent as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) standard locking activity) or an RCOM (recovery message sent as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) lockspace recovery). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) void dlm_receive_buffer(union dlm_packet *p, int nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) struct dlm_header *hd = &p->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) struct dlm_ls *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) switch (hd->h_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) case DLM_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) dlm_message_in(&p->message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) type = p->message.m_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) case DLM_RCOM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) dlm_rcom_in(&p->rcom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) type = p->rcom.rc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) if (hd->h_nodeid != nodeid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) log_print("invalid h_nodeid %d from %d lockspace %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) hd->h_nodeid, nodeid, hd->h_lockspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) ls = dlm_find_lockspace_global(hd->h_lockspace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) if (!ls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) if (dlm_config.ci_log_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) "%u from %d cmd %d type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) hd->h_lockspace, nodeid, hd->h_cmd, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) dlm_send_ls_not_ready(nodeid, &p->rcom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) be inactive (in this ls) before transitioning to recovery mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) down_read(&ls->ls_recv_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) if (hd->h_cmd == DLM_MSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) dlm_receive_message(ls, &p->message, nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) dlm_receive_rcom(ls, &p->rcom, nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) up_read(&ls->ls_recv_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) dlm_put_lockspace(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) struct dlm_message *ms_stub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) if (middle_conversion(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) memset(ms_stub, 0, sizeof(struct dlm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) ms_stub->m_flags = DLM_IFL_STUB_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) ms_stub->m_result = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) _receive_convert_reply(lkb, ms_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) /* Same special case as in receive_rcom_lock_args() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) unhold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) lkb->lkb_flags |= DLM_IFL_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) conversions are async; there's no reply from the remote master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) /* A waiting lkb needs recovery if the master node has failed, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) the master node is changing (only when no directory is used) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) int dir_nodeid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) if (dlm_no_directory(ls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) /* Recovery for locks that are waiting for replies from nodes that are now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) gone. We can just complete unlocks and cancels by faking a reply from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) dead node. Requests and up-conversions we flag to be resent after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) recovery. Down-conversions can just be completed with a fake reply like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) unlocks. Conversions between PR and CW need special attention. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) void dlm_recover_waiters_pre(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) struct dlm_message *ms_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) int wait_type, stub_unlock_result, stub_cancel_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) int dir_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) if (!ms_stub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) /* exclude debug messages about unlocks because there can be so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) many and they aren't very interesting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) lkb->lkb_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) lkb->lkb_remid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) lkb->lkb_wait_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) lkb->lkb_resource->res_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) lkb->lkb_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) lkb->lkb_wait_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) dir_nodeid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) /* all outstanding lookups, regardless of destination will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) resent after recovery is done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) lkb->lkb_flags |= DLM_IFL_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) wait_type = lkb->lkb_wait_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) stub_unlock_result = -DLM_EUNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) stub_cancel_result = -DLM_ECANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) /* Main reply may have been received leaving a zero wait_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) but a reply for the overlapping op may not have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) received. In that case we need to fake the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) reply for the overlap op. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (!wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) if (is_overlap_cancel(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) wait_type = DLM_MSG_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) if (lkb->lkb_grmode == DLM_LOCK_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) stub_cancel_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) if (is_overlap_unlock(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) wait_type = DLM_MSG_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) if (lkb->lkb_grmode == DLM_LOCK_IV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) stub_unlock_result = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) log_debug(ls, "rwpre overlap %x %x %d %d %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) lkb->lkb_id, lkb->lkb_flags, wait_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) stub_cancel_result, stub_unlock_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) switch (wait_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) lkb->lkb_flags |= DLM_IFL_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) recover_convert_waiter(ls, lkb, ms_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) case DLM_MSG_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) memset(ms_stub, 0, sizeof(struct dlm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) ms_stub->m_flags = DLM_IFL_STUB_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) ms_stub->m_result = stub_unlock_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) _receive_unlock_reply(lkb, ms_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) case DLM_MSG_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) memset(ms_stub, 0, sizeof(struct dlm_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) ms_stub->m_flags = DLM_IFL_STUB_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) ms_stub->m_result = stub_cancel_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) _receive_cancel_reply(lkb, ms_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) log_error(ls, "invalid lkb wait_type %d %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) lkb->lkb_wait_type, wait_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) kfree(ms_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) if (lkb->lkb_flags & DLM_IFL_RESEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) lkb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) master or dir-node for r. Processing the lkb may result in it being placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) back on waiters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) /* We do this after normal locking has been enabled and any saved messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) (in requestqueue) have been processed. We should be confident that at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) this point we won't get or process a reply to any of these waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) operations. But, new ops may be coming in on the rsbs/locks here from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) userspace or remotely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) /* there may have been an overlap unlock/cancel prior to recovery or after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) recovery. if before, the lkb may still have a pos wait_count; if after, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) overlap flag would just have been set and nothing new sent. we can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) confident here than any replies to either the initial op or overlap ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) prior to recovery have been received. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) int dlm_recover_waiters_post(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) int error = 0, mstype, err, oc, ou;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) if (dlm_locking_stopped(ls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) log_debug(ls, "recover_waiters_post aborted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) lkb = find_resend_waiter(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) if (!lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) mstype = lkb->lkb_wait_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) oc = is_overlap_cancel(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) ou = is_overlap_unlock(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) dlm_dir_nodeid(r), oc, ou);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) /* At this point we assume that we won't get a reply to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) previous op or overlap op on this lock. First, do a big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) remove_from_waiters() for all previous ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) lkb->lkb_flags &= ~DLM_IFL_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) lkb->lkb_wait_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) lkb->lkb_wait_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) mutex_lock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) list_del_init(&lkb->lkb_wait_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) mutex_unlock(&ls->ls_waiters_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) unhold_lkb(lkb); /* for waiters list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) if (oc || ou) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) /* do an unlock or cancel instead of resending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) switch (mstype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) -DLM_ECANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) unhold_lkb(lkb); /* undoes create_lkb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) if (oc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) queue_cast(r, lkb, -DLM_ECANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) _unlock_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) switch (mstype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) case DLM_MSG_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) case DLM_MSG_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) _request_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) if (is_master(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) confirm_master(r, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) case DLM_MSG_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) _convert_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) log_error(ls, "waiter %x msg %d r_nodeid %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) "dir_nodeid %d overlap %d %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) lkb->lkb_id, mstype, r->res_nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) dlm_dir_nodeid(r), oc, ou);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) if (!is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) /* don't purge lkbs we've added in recover_master_copy for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) the current recovery seq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) if (lkb->lkb_recover_seq == ls->ls_recover_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) /* this put should free the lkb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) if (!dlm_put_lkb(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) log_error(ls, "purged mstcpy lkb not released");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) struct dlm_ls *ls = r->res_ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) purge_mstcpy_list(ls, r, &r->res_grantqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) purge_mstcpy_list(ls, r, &r->res_convertqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) purge_mstcpy_list(ls, r, &r->res_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) struct list_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) int nodeid_gone, unsigned int *count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) if (!is_master_copy(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) if ((lkb->lkb_nodeid == nodeid_gone) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) dlm_is_removed(ls, lkb->lkb_nodeid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) /* tell recover_lvb to invalidate the lvb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) because a node holding EX/PW failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) (lkb->lkb_grmode >= DLM_LOCK_PW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) del_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) /* this put should free the lkb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) if (!dlm_put_lkb(lkb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) log_error(ls, "purged dead lkb not released");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) rsb_set_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) (*count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) /* Get rid of locks held by nodes that are gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) void dlm_recover_purge(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) struct dlm_member *memb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) int nodes_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) int nodeid_gone = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) unsigned int lkb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) /* cache one removed nodeid to optimize the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) case of a single node removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) nodes_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) nodeid_gone = memb->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (!nodes_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) down_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) if (is_master(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) purge_dead_list(ls, r, &r->res_grantqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) nodeid_gone, &lkb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) purge_dead_list(ls, r, &r->res_convertqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) nodeid_gone, &lkb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) purge_dead_list(ls, r, &r->res_waitqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) nodeid_gone, &lkb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) unhold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) up_write(&ls->ls_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) if (lkb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) lkb_count, nodes_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) spin_lock(&ls->ls_rsbtbl[bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) r = rb_entry(n, struct dlm_rsb, res_hashnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) if (!rsb_flag(r, RSB_RECOVER_GRANT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) if (!is_master(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) rsb_clear_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) spin_unlock(&ls->ls_rsbtbl[bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) spin_unlock(&ls->ls_rsbtbl[bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) * Attempt to grant locks on resources that we are the master of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) * Locks may have become grantable during recovery because locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) * from departed nodes have been purged (or not rebuilt), allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) * previously blocked locks to now be granted. The subset of rsb's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) * we are interested in are those with lkb's on either the convert or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) * waiting queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) * Simplest would be to go through each master rsb and check for non-empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) * convert or waiting queues, and attempt to grant on those rsbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) * Checking the queues requires lock_rsb, though, for which we'd need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) * to release the rsbtbl lock. This would make iterating through all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) * rsb's very inefficient. So, we rely on earlier recovery routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) * to set RECOVER_GRANT on any rsb's that we should attempt to grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) * locks for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) void dlm_recover_grant(struct dlm_ls *ls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) int bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) unsigned int rsb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) unsigned int lkb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) r = find_grant_rsb(ls, bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) if (bucket == ls->ls_rsbtbl_size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) bucket++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) rsb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) /* the RECOVER_GRANT flag is checked in the grant path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) grant_pending_locks(r, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) rsb_clear_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) lkb_count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) confirm_master(r, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) if (lkb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) lkb_count, rsb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) uint32_t remid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) list_for_each_entry(lkb, head, lkb_statequeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) uint32_t remid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) if (lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) if (lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) if (lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) /* needs at least dlm_rcom + rcom_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) struct dlm_rsb *r, struct dlm_rcom *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) lkb->lkb_nodeid = rc->rc_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) lkb->lkb_flags |= DLM_IFL_MSTCPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) lkb->lkb_rqmode = rl->rl_rqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) lkb->lkb_grmode = rl->rl_grmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) /* don't set lkb_status because add_lkb wants to itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) sizeof(struct rcom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) if (lvblen > ls->ls_lvblen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) if (!lkb->lkb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) /* Conversions between PR and CW (middle modes) need special handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) The real granted mode of these converting locks cannot be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) until all locks have been rebuilt on the rsb (recover_conversion) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) middle_conversion(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) rl->rl_status = DLM_LKSTS_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) lkb->lkb_grmode = DLM_LOCK_IV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) rsb_set_flag(r, RSB_RECOVER_CONVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) /* This lkb may have been recovered in a previous aborted recovery so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) to check if the rsb already has an lkb with the given remote nodeid/lkid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) If so we just send back a standard reply. If not, we create a new lkb with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) the given values and send back our lkid. We send back our lkid by sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) back the rcom_lock struct we got but with the remid field filled in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) /* needs at least dlm_rcom + rcom_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) uint32_t remid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) int from_nodeid = rc->rc_header.h_nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) if (rl->rl_parent_lkid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) error = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) remid = le32_to_cpu(rl->rl_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) /* In general we expect the rsb returned to be R_MASTER, but we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) have to require it. Recovery of masters on one node can overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) recovery of locks on another node, so one node can send us MSTCPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) locks before we've made ourselves master of this rsb. We can still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) add new MSTCPY locks that we receive here without any harm; when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) we make ourselves master, dlm_recover_masters() won't touch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) MSTCPY locks we've received early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) from_nodeid, R_RECEIVE_RECOVER, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) from_nodeid, remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) error = -EBADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) lkb = search_remid(r, from_nodeid, remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) if (lkb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) goto out_remid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) error = create_lkb(ls, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) error = receive_rcom_lock_args(ls, lkb, r, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) attach_lkb(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) add_lkb(r, lkb, rl->rl_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) ls->ls_recover_locks_in++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) rsb_set_flag(r, RSB_RECOVER_GRANT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) out_remid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) /* this is the new value returned to the lock holder for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) saving in its process-copy lkb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) rl->rl_remid = cpu_to_le32(lkb->lkb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) lkb->lkb_recover_seq = ls->ls_recover_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) if (error && error != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) from_nodeid, remid, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) rl->rl_result = cpu_to_le32(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) /* needs at least dlm_rcom + rcom_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) uint32_t lkid, remid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) int error, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) lkid = le32_to_cpu(rl->rl_lkid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) remid = le32_to_cpu(rl->rl_remid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) result = le32_to_cpu(rl->rl_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) lkid, rc->rc_header.h_nodeid, remid, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) if (!is_process_copy(lkb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) lkid, rc->rc_header.h_nodeid, remid, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) dlm_dump_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) case -EBADR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) /* There's a chance the new master received our lock before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) dlm_recover_master_reply(), this wouldn't happen if we did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) a barrier between recover_masters and recover_locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) lkid, rc->rc_header.h_nodeid, remid, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) dlm_send_rcom_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) case -EEXIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) lkb->lkb_remid = remid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) lkid, rc->rc_header.h_nodeid, remid, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) /* an ack for dlm_recover_locks() which waits for replies from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) all the locks it sends to new masters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) dlm_recovered_lock(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) int mode, uint32_t flags, void *name, unsigned int namelen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) unsigned long timeout_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) error = create_lkb(ls, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) kfree(ua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) if (flags & DLM_LKF_VALBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) if (!ua->lksb.sb_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) kfree(ua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) fake_astfn, ua, fake_bastfn, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) kfree(ua->lksb.sb_lvbptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) ua->lksb.sb_lvbptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) kfree(ua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) /* After ua is attached to lkb it will be freed by dlm_free_lkb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) When DLM_IFL_USER is set, the dlm knows that this is a userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) lock and that lkb_astparam is the dlm_user_args structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) lkb->lkb_flags |= DLM_IFL_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) error = request_lock(ls, lkb, name, namelen, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) __put_lkb(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) /* add this new lkb to the per-process list of locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) spin_lock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) hold_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) spin_unlock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) unsigned long timeout_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) struct dlm_user_args *ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) /* user can change the params on its lock when it converts it, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) add an lvb that didn't exist before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) ua = lkb->lkb_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) if (!ua->lksb.sb_lvbptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) if (lvb_in && ua->lksb.sb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) ua->xid = ua_tmp->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) ua->castparam = ua_tmp->castparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) ua->castaddr = ua_tmp->castaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) ua->bastparam = ua_tmp->bastparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) ua->bastaddr = ua_tmp->bastaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) ua->user_lksb = ua_tmp->user_lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) fake_astfn, ua, fake_bastfn, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) error = convert_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) kfree(ua_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) * The caller asks for an orphan lock on a given resource with a given mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) * If a matching lock exists, it's moved to the owner's list of locks and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) * the lkid is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) int mode, uint32_t flags, void *name, unsigned int namelen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) unsigned long timeout_cs, uint32_t *lkid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) struct dlm_user_args *ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) int found_other_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) mutex_lock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) if (lkb->lkb_resource->res_length != namelen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) if (memcmp(lkb->lkb_resource->res_name, name, namelen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) if (lkb->lkb_grmode != mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) found_other_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) *lkid = lkb->lkb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) mutex_unlock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) if (!found && found_other_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) rv = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) rv = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) lkb->lkb_exflags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) lkb->lkb_ownpid = (int) current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) ua = lkb->lkb_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) ua->proc = ua_tmp->proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) ua->xid = ua_tmp->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) ua->castparam = ua_tmp->castparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) ua->castaddr = ua_tmp->castaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) ua->bastparam = ua_tmp->bastparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) ua->bastaddr = ua_tmp->bastaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) ua->user_lksb = ua_tmp->user_lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) * The lkb reference from the ls_orphans list was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) * removed above, and is now considered the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) * for the proc locks list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) spin_lock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) spin_unlock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) kfree(ua_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) uint32_t flags, uint32_t lkid, char *lvb_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) struct dlm_user_args *ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) ua = lkb->lkb_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) if (lvb_in && ua->lksb.sb_lvbptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) if (ua_tmp->castparam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) ua->castparam = ua_tmp->castparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) ua->user_lksb = ua_tmp->user_lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) error = set_unlock_args(flags, ua, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) error = unlock_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) if (error == -DLM_EUNLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) /* from validate_unlock_args() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) spin_lock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) /* dlm_user_add_cb() may have already taken lkb off the proc list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) if (!list_empty(&lkb->lkb_ownqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) spin_unlock(&ua->proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) kfree(ua_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) uint32_t flags, uint32_t lkid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) struct dlm_user_args *ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) ua = lkb->lkb_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) if (ua_tmp->castparam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) ua->castparam = ua_tmp->castparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) ua->user_lksb = ua_tmp->user_lksb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) error = set_unlock_args(flags, ua, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) error = cancel_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) if (error == -DLM_ECANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) /* from validate_unlock_args() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) if (error == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) kfree(ua_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) struct dlm_lkb *lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) struct dlm_user_args *ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) struct dlm_rsb *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) error = find_lkb(ls, lkid, &lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) ua = lkb->lkb_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) error = set_unlock_args(flags, ua, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) r = lkb->lkb_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) hold_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) lock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) error = validate_unlock_args(lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) goto out_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) error = _cancel_lock(r, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) out_r:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) unlock_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) put_rsb(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) if (error == -DLM_ECANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) /* from validate_unlock_args() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) if (error == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) /* lkb's that are removed from the waiters list by revert are just left on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) orphans list with the granted orphan locks, to be freed by purge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) hold_lkb(lkb); /* reference for the ls_orphans list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) mutex_lock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) mutex_unlock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) set_unlock_args(0, lkb->lkb_ua, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) error = cancel_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) if (error == -DLM_ECANCEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) granted. Regardless of what rsb queue the lock is on, it's removed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) struct dlm_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) lkb->lkb_ua, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) error = unlock_lock(ls, lkb, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) if (error == -DLM_EUNLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) (which does lock_rsb) due to deadlock with receiving a message that does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) lock_rsb followed by dlm_user_add_cb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) struct dlm_user_proc *proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) struct dlm_lkb *lkb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) mutex_lock(&ls->ls_clear_proc_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) if (list_empty(&proc->locks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) lkb->lkb_flags |= DLM_IFL_ORPHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) lkb->lkb_flags |= DLM_IFL_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) mutex_unlock(&ls->ls_clear_proc_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) return lkb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) which we clear here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) /* proc CLOSING flag is set so no more device_reads should look at proc->asts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) list, and no more device_writes should add lkb's to proc->locks list; so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) shouldn't need to take asts_spin or locks_spin here. this assumes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) device reads/writes/closes are serialized -- FIXME: we may need to serialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) them ourself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) lkb = del_proc_lock(ls, proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) if (!lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) del_timeout(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) orphan_proc_lock(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) unlock_proc_lock(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) /* this removes the reference for the proc->locks list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) added by dlm_user_request, it may result in the lkb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) being freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) mutex_lock(&ls->ls_clear_proc_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) /* in-progress unlocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) lkb->lkb_flags |= DLM_IFL_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) memset(&lkb->lkb_callbacks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) list_del_init(&lkb->lkb_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) mutex_unlock(&ls->ls_clear_proc_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) lkb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) spin_lock(&proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) if (!list_empty(&proc->locks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) lkb = list_entry(proc->locks.next, struct dlm_lkb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) spin_unlock(&proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) if (!lkb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) lkb->lkb_flags |= DLM_IFL_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) unlock_proc_lock(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) dlm_put_lkb(lkb); /* ref from proc->locks list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) spin_lock(&proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) lkb->lkb_flags |= DLM_IFL_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) spin_unlock(&proc->locks_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) spin_lock(&proc->asts_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) memset(&lkb->lkb_callbacks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) list_del_init(&lkb->lkb_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) spin_unlock(&proc->asts_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) /* pid of 0 means purge all orphans */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) struct dlm_lkb *lkb, *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) mutex_lock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) if (pid && lkb->lkb_ownpid != pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) unlock_proc_lock(ls, lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) list_del_init(&lkb->lkb_ownqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) dlm_put_lkb(lkb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) mutex_unlock(&ls->ls_orphans_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) struct dlm_message *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) struct dlm_mhandle *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) error = _create_message(ls, sizeof(struct dlm_message), nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) DLM_MSG_PURGE, &ms, &mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) ms->m_nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) ms->m_pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) return send_message(mh, ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) int nodeid, int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) if (nodeid && (nodeid != dlm_our_nodeid())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) error = send_purge(ls, nodeid, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) dlm_lock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) if (pid == current->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) purge_proc_locks(ls, proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) do_purge(ls, nodeid, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) dlm_unlock_recovery(ls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314)