^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* AFS file locking support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define AFS_LOCK_GRANTED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define AFS_LOCK_PENDING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define AFS_LOCK_YOUR_TRY 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct workqueue_struct *afs_lock_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static void afs_next_locker(struct afs_vnode *vnode, int error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void afs_fl_release_private(struct file_lock *fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static const struct file_lock_operations afs_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) .fl_copy_lock = afs_fl_copy_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .fl_release_private = afs_fl_release_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) _debug("STATE %u -> %u", vnode->lock_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) vnode->lock_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static atomic_t afs_file_lock_debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * if the callback is broken on this vnode, then the lock may now be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void afs_lock_may_be_available(struct afs_vnode *vnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) afs_next_locker(vnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * the lock will time out in 5 minutes unless we extend it, so schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * extension in a bit less than that time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void afs_schedule_lock_extension(struct afs_vnode *vnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ktime_t expires_at, now, duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u64 duration_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) now = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) duration = ktime_sub(expires_at, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (duration <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) duration_j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * In the case of successful completion of a lock operation, record the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * the reply appeared and start the lock extension timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void afs_lock_op_done(struct afs_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct afs_operation *op = call->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct afs_vnode *vnode = op->file[0].vnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (call->error == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) vnode->locked_at = call->reply_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) afs_schedule_lock_extension(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * grant one or more locks (readlocks are allowed to jump the queue if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * first lock in the queue is itself a readlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * - the caller must hold the vnode lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void afs_grant_locks(struct afs_vnode *vnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct file_lock *p, *_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!exclusive && p->fl_type == F_WRLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) p->fl_u.afs.state = AFS_LOCK_GRANTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) trace_afs_flock_op(vnode, p, afs_flock_op_grant);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) wake_up(&p->fl_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * If an error is specified, reject every pending lock that matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * authentication and type of the lock we failed to get. If there are any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * remaining lockers, try to wake up one of them to have a go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void afs_next_locker(struct afs_vnode *vnode, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct file_lock *p, *_p, *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct key *key = vnode->lock_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int fl_type = F_RDLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (vnode->lock_type == AFS_LOCK_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) fl_type = F_WRLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (error &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) p->fl_type == fl_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) afs_file_key(p->fl_file) == key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_del_init(&p->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) p->fl_u.afs.state = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wake_up(&p->fl_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Select the next locker to hand off to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (next &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) next = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) vnode->lock_key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) key_put(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace_afs_flock_op(vnode, next, afs_flock_op_wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) wake_up(&next->fl_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Kill off all waiters in the the pending lock queue due to the vnode being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct file_lock *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) while (!list_empty(&vnode->pending_locks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) p = list_entry(vnode->pending_locks.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct file_lock, fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) list_del_init(&p->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) p->fl_u.afs.state = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) wake_up(&p->fl_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) key_put(vnode->lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) vnode->lock_key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void afs_lock_success(struct afs_operation *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) _enter("op=%08x", op->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) afs_vnode_commit_status(op, &op->file[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static const struct afs_operation_ops afs_set_lock_operation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .issue_afs_rpc = afs_fs_set_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .issue_yfs_rpc = yfs_fs_set_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .success = afs_lock_success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .aborted = afs_check_for_remote_deletion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Get a lock on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) afs_lock_type_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct afs_operation *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) _enter("%s{%llx:%llu.%u},%x,%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) vnode->volume->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) vnode->fid.vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vnode->fid.vnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) vnode->fid.unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) key_serial(key), type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) op = afs_alloc_operation(key, vnode->volume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) afs_op_set_vnode(op, 0, vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) op->lock.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) op->ops = &afs_set_lock_operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return afs_do_sync_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct afs_operation_ops afs_extend_lock_operation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .issue_afs_rpc = afs_fs_extend_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .issue_yfs_rpc = yfs_fs_extend_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .success = afs_lock_success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Extend a lock on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct afs_operation *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) _enter("%s{%llx:%llu.%u},%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) vnode->volume->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) vnode->fid.vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) vnode->fid.vnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) vnode->fid.unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) key_serial(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) op = afs_alloc_operation(key, vnode->volume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) afs_op_set_vnode(op, 0, vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) op->flags |= AFS_OPERATION_UNINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) op->ops = &afs_extend_lock_operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return afs_do_sync_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static const struct afs_operation_ops afs_release_lock_operation = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .issue_afs_rpc = afs_fs_release_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .issue_yfs_rpc = yfs_fs_release_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .success = afs_lock_success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Release a lock on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct afs_operation *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) _enter("%s{%llx:%llu.%u},%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) vnode->volume->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) vnode->fid.vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) vnode->fid.vnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) vnode->fid.unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) key_serial(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) op = afs_alloc_operation(key, vnode->volume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) afs_op_set_vnode(op, 0, vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) op->flags |= AFS_OPERATION_UNINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) op->ops = &afs_release_lock_operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return afs_do_sync_operation(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * do work for a lock, including:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * - probing for a lock we're waiting on but didn't get immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * - extending a lock that's close to timing out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void afs_lock_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct afs_vnode *vnode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) container_of(work, struct afs_vnode, lock_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) _debug("wstate %u for %p", vnode->lock_state, vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) switch (vnode->lock_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case AFS_VNODE_LOCK_NEED_UNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* attempt to release the server lock; if it fails, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * wait 5 minutes and it'll expire anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret = afs_release_lock(vnode, vnode->lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) printk(KERN_WARNING "AFS:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) " Failed to release lock on {%llx:%llx} error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) vnode->fid.vid, vnode->fid.vnode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) afs_kill_lockers_enoent(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) afs_next_locker(vnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* If we've already got a lock, then it must be time to extend that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * lock as AFS locks time out after 5 minutes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case AFS_VNODE_LOCK_GRANTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) _debug("extend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ASSERT(!list_empty(&vnode->granted_locks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) key = key_get(vnode->lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ret = afs_extend_lock(vnode, key); /* RPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) key_put(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) vnode->fid.vid, vnode->fid.vnode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) afs_kill_lockers_enoent(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) queue_delayed_work(afs_lock_manager, &vnode->lock_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) HZ * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) _leave(" [ext]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* If we're waiting for a callback to indicate lock release, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * actually rely on this, so need to recheck at regular intervals. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * problem is that the server might not notify us if the lock just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * expires (say because a client died) rather than being explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case AFS_VNODE_LOCK_WAITING_FOR_CB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) _debug("retry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) afs_next_locker(vnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) case AFS_VNODE_LOCK_DELETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) afs_kill_lockers_enoent(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Looks like a lock request was withdrawn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) _leave(" [no]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * pass responsibility for the unlocking of a vnode on the server to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * manager thread, lest a pending signal in the calling thread interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * AF_RXRPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * - the caller must hold the vnode lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void afs_defer_unlock(struct afs_vnode *vnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) _enter("%u", vnode->lock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (list_empty(&vnode->granted_locks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cancel_delayed_work(&vnode->lock_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Check that our view of the file metadata is up to date and check to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * whether we think that we have a locking permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) enum afs_flock_mode mode, afs_lock_type_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) afs_access_t access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* Make sure we've got a callback on this file and that our view of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * data version is up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = afs_validate(vnode, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Check the permission set to see if we're actually going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * allowed to get a lock on this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = afs_check_permit(vnode, key, &access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * read-lock a file and WRITE or INSERT perm to write-lock a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * We can't rely on the server to do this for us since if we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * share a read lock that we already have, we won't go the server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (type == AFS_LOCK_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * request a lock on a file on the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int afs_do_setlk(struct file *file, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct inode *inode = locks_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct afs_vnode *vnode = AFS_FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) afs_lock_type_t type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct key *key = afs_file_key(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bool partial, no_server_lock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (mode == afs_flock_mode_unset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mode = afs_flock_mode_openafs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) _enter("{%llx:%llu},%llu-%llu,%u,%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) vnode->fid.vid, vnode->fid.vnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) fl->fl_start, fl->fl_end, fl->fl_type, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) fl->fl_ops = &afs_lock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) INIT_LIST_HEAD(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) fl->fl_u.afs.state = AFS_LOCK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (mode == afs_flock_mode_write && partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) type = AFS_LOCK_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = afs_do_setlk_check(vnode, key, mode, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* AFS3 protocol only supports full-file locks and doesn't provide any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * method of upgrade/downgrade, so we need to emulate for partial-file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * The OpenAFS client only gets a server lock for a full-file lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * keeps partial-file locks local. Allow this behaviour to be emulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * (as the default).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (mode == afs_flock_mode_local ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) (partial && mode == afs_flock_mode_openafs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) no_server_lock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto skip_server_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* If we've already got a lock on the server then try to move to having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * the VFS grant the requested lock. Note that this means that other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * clients may get starved out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) _debug("try %u", vnode->lock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (type == AFS_LOCK_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) _debug("instant readlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) fl->fl_u.afs.state = AFS_LOCK_GRANTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto vnode_is_locked_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (vnode->lock_type == AFS_LOCK_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) _debug("instant writelock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) fl->fl_u.afs.state = AFS_LOCK_GRANTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto vnode_is_locked_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) !(fl->fl_flags & FL_SLEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (type == AFS_LOCK_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (vnode->status.lock_count == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto lock_is_contended; /* Write locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (vnode->status.lock_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto lock_is_contended; /* Locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto need_to_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) try_to_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* We don't have a lock on this vnode and we aren't currently waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * for one either, so ask the server for a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Note that we need to be careful if we get interrupted by a signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * after dispatching the request as we may still get the lock, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * though we don't wait for the reply (it's not too bad a problem - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * lock will expire in 5 mins anyway).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) vnode->lock_key = key_get(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) vnode->lock_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = afs_set_lock(vnode, key, type); /* RPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case -EKEYREJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) case -EKEYEXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) case -EKEYREVOKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) fl->fl_u.afs.state = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) afs_next_locker(vnode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) fl->fl_u.afs.state = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) afs_kill_lockers_enoent(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) fl->fl_u.afs.state = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) afs_next_locker(vnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case -EWOULDBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* The server doesn't have a lock-waiting queue, so the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * will have to retry. The server will break the outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * callbacks on a file when a lock is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ASSERT(list_empty(&vnode->granted_locks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto lock_is_contended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) afs_grant_locks(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) goto vnode_is_locked_u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) vnode_is_locked_u:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) vnode_is_locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* the lock has been granted by the server... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) skip_server_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* ... but the VFS still needs to distribute access on this client. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = locks_lock_file_wait(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto vfs_rejected_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Again, make sure we've got a callback on this file and, again, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * sure that our view of the data version is up to date (we ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * errors incurred here and deal with the consequences elsewhere).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) afs_validate(vnode, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) _leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) lock_is_contended:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!(fl->fl_flags & FL_SLEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) afs_next_locker(vnode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) need_to_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* We're going to have to wait. Either this client doesn't have a lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * on the server yet and we need to wait for a callback to occur, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * the client does have a lock on the server, but it's shared and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * need an exclusive lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = wait_event_interruptible(fl->fl_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) fl->fl_u.afs.state != AFS_LOCK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) switch (fl->fl_u.afs.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case AFS_LOCK_YOUR_TRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) fl->fl_u.afs.state = AFS_LOCK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto try_to_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case AFS_LOCK_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* We need to retry the lock. We may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * notified by the server if it just expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * rather than being released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) fl->fl_u.afs.state = AFS_LOCK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto try_to_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) case AFS_LOCK_GRANTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) goto vnode_is_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = fl->fl_u.afs.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) vfs_rejected_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* The VFS rejected the lock we just obtained, so we have to discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * what we just got. We defer this to the lock manager work item to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * deal with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) _debug("vfs refused %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (no_server_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) afs_defer_unlock(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * unlock on a file on the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int afs_do_unlk(struct file *file, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Flush all pending writes before doing anything with locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) vfs_fsync(file, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ret = locks_lock_file_wait(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) _leave(" = %d [%u]", ret, vnode->lock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * return information about a lock we currently hold, if indeed we hold one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int afs_do_getlk(struct file *file, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct key *key = afs_file_key(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int ret, lock_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) fl->fl_type = F_UNLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* check local lock records first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) posix_test_lock(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (fl->fl_type == F_UNLCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* no local locks; consult the server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ret = afs_fetch_status(vnode, key, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) lock_count = READ_ONCE(vnode->status.lock_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (lock_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (lock_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) fl->fl_type = F_RDLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) fl->fl_type = F_WRLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) fl->fl_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) fl->fl_end = OFFSET_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) fl->fl_pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) _leave(" = %d [%hd]", ret, fl->fl_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * manage POSIX locks on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int afs_lock(struct file *file, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) enum afs_flock_operation op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) vnode->fid.vid, vnode->fid.vnode, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) fl->fl_type, fl->fl_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) (long long) fl->fl_start, (long long) fl->fl_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* AFS doesn't support mandatory locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENOLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (IS_GETLK(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return afs_do_getlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (fl->fl_type == F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret = afs_do_unlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = afs_do_setlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) case 0: op = afs_flock_op_return_ok; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) case -EAGAIN: op = afs_flock_op_return_eagain; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) default: op = afs_flock_op_return_error; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) trace_afs_flock_op(vnode, fl, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * manage FLOCK locks on a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int afs_flock(struct file *file, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) enum afs_flock_operation op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) vnode->fid.vid, vnode->fid.vnode, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) fl->fl_type, fl->fl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * No BSD flocks over NFS allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Note: we could try to fake a POSIX lock request here by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * using ((u32) filp | 0x80000000) or some such as the pid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Not sure whether that would be unique, though, or whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * that would break in other places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!(fl->fl_flags & FL_FLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -ENOLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* we're simulating flock() locks using posix locks on the server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (fl->fl_type == F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ret = afs_do_unlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ret = afs_do_setlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) case 0: op = afs_flock_op_return_ok; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) case -EAGAIN: op = afs_flock_op_return_eagain; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) default: op = afs_flock_op_return_error; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) trace_afs_flock_op(vnode, fl, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * the POSIX lock management core VFS code copies the lock record and adds the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * copy into its own list, so we need to add that copy to the vnode's lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * queue in the same place as the original (which will be deleted shortly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * after)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * need to remove this lock from the vnode queue when it's removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * VFS's list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void afs_fl_release_private(struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) spin_lock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) list_del_init(&fl->fl_u.afs.link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (list_empty(&vnode->granted_locks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) afs_defer_unlock(vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) _debug("state %u for %p", vnode->lock_state, vnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) spin_unlock(&vnode->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }