Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (C) 2011 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "dm-block-manager.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "dm-persistent-data-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/dm-bufio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define DM_MSG_PREFIX "block manager"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * This is a read/write semaphore with a couple of differences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * i) There is a restriction on the number of concurrent read locks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * may be held at once.  This is just an implementation detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * ii) Recursive locking attempts are detected and return EINVAL.  A stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * trace is also emitted for the previous lock acquisition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * iii) Priority is given to write locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MAX_HOLDERS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define MAX_STACK 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) struct stack_store {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned int	nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long	entries[MAX_STACK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct block_lock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	__s32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct list_head waiters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct task_struct *holders[MAX_HOLDERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct stack_store traces[MAX_HOLDERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) struct waiter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	int wants_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static unsigned __find_holder(struct block_lock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			      struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	for (i = 0; i < MAX_HOLDERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		if (lock->holders[i] == task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	BUG_ON(i == MAX_HOLDERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* call this *after* you increment lock->count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static void __add_holder(struct block_lock *lock, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	unsigned h = __find_holder(lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct stack_store *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	get_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	lock->holders[h] = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	t = lock->traces + h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* call this *before* you decrement lock->count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static void __del_holder(struct block_lock *lock, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	unsigned h = __find_holder(lock, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	lock->holders[h] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static int __check_holder(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	for (i = 0; i < MAX_HOLDERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		if (lock->holders[i] == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			DMERR("recursive lock detected in metadata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			DMERR("previously held here:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			stack_trace_print(lock->traces[i].entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 					  lock->traces[i].nr_entries, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			DMERR("subsequent acquisition attempted here:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void __wait(struct waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (!w->task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void __wake_waiter(struct waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	list_del(&w->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	task = w->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	w->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * We either wake a few readers or a single writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void __wake_many(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct waiter *w, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	BUG_ON(lock->count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		if (lock->count >= MAX_HOLDERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		if (w->wants_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			if (lock->count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				return; /* still read locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			lock->count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			__add_holder(lock, w->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			__wake_waiter(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		lock->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		__add_holder(lock, w->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		__wake_waiter(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void bl_init(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	spin_lock_init(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	lock->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	INIT_LIST_HEAD(&lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	for (i = 0; i < MAX_HOLDERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		lock->holders[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int __available_for_read(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	return lock->count >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		lock->count < MAX_HOLDERS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		list_empty(&lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int bl_down_read(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct waiter w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	spin_lock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	r = __check_holder(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (__available_for_read(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		lock->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		__add_holder(lock, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	get_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	w.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	w.wants_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	list_add_tail(&w.list, &lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	__wait(&w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	put_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int bl_down_read_nonblock(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	spin_lock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	r = __check_holder(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (__available_for_read(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		lock->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		__add_holder(lock, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		r = -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void bl_up_read(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	spin_lock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	BUG_ON(lock->count <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	__del_holder(lock, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	--lock->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (!list_empty(&lock->waiters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		__wake_many(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int bl_down_write(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct waiter w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	spin_lock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	r = __check_holder(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (lock->count == 0 && list_empty(&lock->waiters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		lock->count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		__add_holder(lock, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	get_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	w.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	w.wants_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * Writers given priority. We know there's only one mutator in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 * system, so ignoring the ordering reversal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	list_add(&w.list, &lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	__wait(&w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	put_task_struct(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void bl_up_write(struct block_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	spin_lock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	__del_holder(lock, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	lock->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!list_empty(&lock->waiters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		__wake_many(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	spin_unlock(&lock->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void report_recursive_bug(dm_block_t b, int r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (r == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		DMERR("recursive acquisition of block %llu requested.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		      (unsigned long long) b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #else  /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define bl_init(x) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define bl_down_read(x) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define bl_down_read_nonblock(x) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define bl_up_read(x) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define bl_down_write(x) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define bl_up_write(x) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define report_recursive_bug(x, y) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * Block manager is currently implemented using dm-bufio.  struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * dm_block_manager and struct dm_block map directly onto a couple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * structs in the bufio interface.  I want to retain the freedom to move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * away from bufio in the future.  So these structs are just cast within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * this .c file, rather than making it through to the public interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static struct dm_buffer *to_buffer(struct dm_block *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	return (struct dm_buffer *) b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dm_block_t dm_block_location(struct dm_block *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return dm_bufio_get_block_number(to_buffer(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXPORT_SYMBOL_GPL(dm_block_location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void *dm_block_data(struct dm_block *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return dm_bufio_get_block_data(to_buffer(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) EXPORT_SYMBOL_GPL(dm_block_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct buffer_aux {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct dm_block_validator *validator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	int write_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	struct block_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	aux->validator = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	bl_init(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void dm_block_manager_write_callback(struct dm_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (aux->validator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * Public interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  *--------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct dm_block_manager {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct dm_bufio_client *bufio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	bool read_only:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 						 unsigned block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 						 unsigned max_held_per_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	struct dm_block_manager *bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	bm = kmalloc(sizeof(*bm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (!bm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 					   sizeof(struct buffer_aux),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 					   dm_block_manager_alloc_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 					   dm_block_manager_write_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (IS_ERR(bm->bufio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		r = PTR_ERR(bm->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		kfree(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	bm->read_only = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	return bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) EXPORT_SYMBOL_GPL(dm_block_manager_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void dm_block_manager_destroy(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	dm_bufio_client_destroy(bm->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	kfree(bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned dm_bm_block_size(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return dm_bufio_get_block_size(bm->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) EXPORT_SYMBOL_GPL(dm_bm_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return dm_bufio_get_device_size(bm->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int dm_bm_validate_buffer(struct dm_block_manager *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 				 struct dm_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 				 struct buffer_aux *aux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				 struct dm_block_validator *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	if (unlikely(!aux->validator)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			DMERR_LIMIT("%s validator check failed for block %llu", v->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 				    (unsigned long long) dm_bufio_get_block_number(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		aux->validator = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		if (unlikely(aux->validator != v)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 				    aux->validator->name, v ? v->name : "NULL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 				    (unsigned long long) dm_bufio_get_block_number(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		    struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		    struct dm_block **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct buffer_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		return PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	aux = dm_bufio_get_aux_data(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	r = bl_down_read(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		report_recursive_bug(b, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	aux->write_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		bl_up_read(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) EXPORT_SYMBOL_GPL(dm_bm_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int dm_bm_write_lock(struct dm_block_manager *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		     dm_block_t b, struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		     struct dm_block **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	struct buffer_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	if (dm_bm_is_read_only(bm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	aux = dm_bufio_get_aux_data(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	r = bl_down_write(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		report_recursive_bug(b, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	aux->write_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		bl_up_write(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) EXPORT_SYMBOL_GPL(dm_bm_write_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int dm_bm_read_try_lock(struct dm_block_manager *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			dm_block_t b, struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			struct dm_block **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	struct buffer_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		return PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		return -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	aux = dm_bufio_get_aux_data(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	r = bl_down_read_nonblock(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		report_recursive_bug(b, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	aux->write_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	if (unlikely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		bl_up_read(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int dm_bm_write_lock_zero(struct dm_block_manager *bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			  dm_block_t b, struct dm_block_validator *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			  struct dm_block **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	struct buffer_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (dm_bm_is_read_only(bm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		return PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	memset(p, 0, dm_bm_block_size(bm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	aux = dm_bufio_get_aux_data(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	r = bl_down_write(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		dm_bufio_release(to_buffer(*result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	aux->write_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	aux->validator = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) void dm_bm_unlock(struct dm_block *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	struct buffer_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	aux = dm_bufio_get_aux_data(to_buffer(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	if (aux->write_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		dm_bufio_mark_buffer_dirty(to_buffer(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		bl_up_write(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		bl_up_read(&aux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	dm_bufio_release(to_buffer(b));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) EXPORT_SYMBOL_GPL(dm_bm_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int dm_bm_flush(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	if (dm_bm_is_read_only(bm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	return dm_bufio_write_dirty_buffers(bm->bufio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) EXPORT_SYMBOL_GPL(dm_bm_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	dm_bufio_prefetch(bm->bufio, b, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bool dm_bm_is_read_only(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	return (bm ? bm->read_only : true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void dm_bm_set_read_only(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		bm->read_only = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void dm_bm_set_read_write(struct dm_block_manager *bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (bm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		bm->read_only = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return crc32c(~(u32) 0, data, len) ^ init_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) EXPORT_SYMBOL_GPL(dm_bm_checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*----------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) MODULE_DESCRIPTION("Immutable metadata library for dm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*----------------------------------------------------------------*/