Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include "reiserfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * The previous reiserfs locking scheme was heavily based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * the tricky properties of the Bkl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * - it was acquired recursively by a same task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * - the performances relied on the release-while-schedule() property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Now that we replace it by a mutex, we still want to keep the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * recursive property to avoid big changes in the code structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * We use our own lock_owner here because the owner field on a mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * is only available in SMP or mutex debugging, also we only need this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * for this mutex, no need for a system wide mutex facility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * Also this lock is often released before a call that could block because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * reiserfs performances were partially based on the release while schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * property of the Bkl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) void reiserfs_write_lock(struct super_block *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (sb_i->lock_owner != current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		mutex_lock(&sb_i->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		sb_i->lock_owner = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	/* No need to protect it, only the current task touches it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	sb_i->lock_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) void reiserfs_write_unlock(struct super_block *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	 * Are we unlocking without even holding the lock?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * Such a situation must raise a BUG() if we don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 * to corrupt the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	BUG_ON(sb_i->lock_owner != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (--sb_i->lock_depth == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		sb_i->lock_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		mutex_unlock(&sb_i->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) int __must_check reiserfs_write_unlock_nested(struct super_block *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	/* this can happen when the lock isn't always held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	if (sb_i->lock_owner != current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	depth = sb_i->lock_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	sb_i->lock_depth = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	sb_i->lock_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	mutex_unlock(&sb_i->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void reiserfs_write_lock_nested(struct super_block *s, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* this can happen when the lock isn't always held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (depth == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	mutex_lock(&sb_i->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	sb_i->lock_owner = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	sb_i->lock_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Utility function to force a BUG if it is called without the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * write lock held.  caller is the string printed just before calling BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	WARN_ON(sb_i->lock_depth < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) void reiserfs_lock_check_recursive(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif