Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (C) 2006-2009 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is released under the LGPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/dm-dirty-log.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/device-mapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/dm-log-userspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "dm-log-userspace-transfer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define DM_LOG_USERSPACE_VSN "1.3.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define FLUSH_ENTRY_POOL_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) struct dm_dirty_log_flush_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	region_t region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * This limit on the number of mark and clear request is, to a degree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * arbitrary.  However, there is some basis for the choice in the limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * imposed on the size of data payload by dm-log-userspace-transfer.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * dm_consult_userspace().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define MAX_FLUSH_GROUP_COUNT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct log_c {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct dm_target *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct dm_dev *log_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	char *usr_argv_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	uint32_t usr_argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	uint32_t region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	region_t region_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	uint64_t luid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	char uuid[DM_UUID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * Mark and clear requests are held until a flush is issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 * so that we can group, and thereby limit, the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 * network traffic between kernel and userspace.  The 'flush_lock'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * is used to protect these lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	spinlock_t flush_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct list_head mark_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct list_head clear_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * in_sync_hint gets set when doing is_remote_recovering.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * represents the first region that needs recovery.  IOW, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * first zero bit of sync_bits.  This can be useful for to limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 * traffic for calls like is_remote_recovering and get_resync_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 * but be take care in its use for anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	uint64_t in_sync_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * Workqueue for flush of clear region requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct workqueue_struct *dmlog_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct delayed_work flush_log_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	atomic_t sched_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * Combine userspace flush and mark requests for efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	uint32_t integrated_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	mempool_t flush_entry_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static struct kmem_cache *_flush_entry_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static int userspace_do_request(struct log_c *lc, const char *uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 				int request_type, char *data, size_t data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				char *rdata, size_t *rdata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * If the server isn't there, -ESRCH is returned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * and we must keep trying until the server is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	r = dm_consult_userspace(uuid, lc->luid, request_type, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				 data_size, rdata, rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (r != -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	DMERR(" Userspace log server not found.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		schedule_timeout(2*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		DMWARN("Attempting to contact userspace log server...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 					 lc->usr_argv_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 					 strlen(lc->usr_argv_str) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 					 NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	DMERR("Error trying to resume userspace log: %d", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int build_constructor_string(struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				    unsigned argc, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				    char **ctr_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	int i, str_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	char *str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	*ctr_str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * Determine overall size of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	for (i = 0, str_size = 0; i < argc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		str_size += strlen(argv[i]) + 1; /* +1 for space between args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	str_size += 20; /* Max number of chars in a printed u64 number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	str = kzalloc(str_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (!str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		DMWARN("Unable to allocate memory for constructor string");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	for (i = 0; i < argc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		str_size += sprintf(str + str_size, " %s", argv[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	*ctr_str = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return str_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void do_flush(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct log_c *lc = container_of(work, struct log_c, flush_log_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	atomic_set(&lc->sched_flush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		dm_table_event(lc->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * userspace_ctr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * argv contains:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *	<UUID> [integrated_flush] <other args>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * Where 'other args' are the userspace implementation-specific log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *	<UUID> [integrated_flush] clustered-disk <arg count> <log dev>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *	<region_size> [[no]sync]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * This module strips off the <UUID> and uses it for identification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * purposes when communicating with userspace about a log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * If integrated_flush is defined, the kernel combines flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * and mark requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * The rest of the line, beginning with 'clustered-disk', is passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * to the userspace ctr function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			 unsigned argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	int str_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	char *ctr_str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct log_c *lc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	uint64_t rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	size_t rdata_size = sizeof(rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	char *devices_rdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	size_t devices_rdata_size = DM_NAME_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (argc < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		DMWARN("Too few arguments to userspace dirty log");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	lc = kzalloc(sizeof(*lc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (!lc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		DMWARN("Unable to allocate userspace log context.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/* The ptr value is sufficient for local unique id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	lc->luid = (unsigned long)lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	lc->ti = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		DMWARN("UUID argument too long.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	lc->usr_argc = argc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	spin_lock_init(&lc->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	INIT_LIST_HEAD(&lc->mark_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	INIT_LIST_HEAD(&lc->clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (!strcasecmp(argv[0], "integrated_flush")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		lc->integrated_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		argv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	str_size = build_constructor_string(ti, argc, argv, &ctr_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (str_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		return str_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	devices_rdata = kzalloc(devices_rdata_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (!devices_rdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		DMERR("Failed to allocate memory for device information");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	r = mempool_init_slab_pool(&lc->flush_entry_pool, FLUSH_ENTRY_POOL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				   _flush_entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		DMERR("Failed to create flush_entry_pool");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * Send table string and get back any opened device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				 ctr_str, str_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				 devices_rdata, &devices_rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		if (r == -ESRCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			DMERR("Userspace log server not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			DMERR("Userspace log server failed to create log");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	/* Since the region size does not change, get it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	rdata_size = sizeof(rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 				 NULL, 0, (char *)&rdata, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		DMERR("Failed to get region size of dirty log");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	lc->region_size = (uint32_t)rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (devices_rdata_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		if (devices_rdata[devices_rdata_size - 1] != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			DMERR("DM_ULOG_CTR device return string not properly terminated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		r = dm_get_device(ti, devices_rdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				  dm_table_get_mode(ti->table), &lc->log_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			DMERR("Failed to register %s with device-mapper",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			      devices_rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (lc->integrated_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		lc->dmlog_wq = alloc_workqueue("dmlogd", WQ_MEM_RECLAIM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		if (!lc->dmlog_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			DMERR("couldn't start dmlogd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		INIT_DELAYED_WORK(&lc->flush_log_work, do_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		atomic_set(&lc->sched_flush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	kfree(devices_rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		mempool_exit(&lc->flush_entry_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		kfree(ctr_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		lc->usr_argv_str = ctr_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		log->context = lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void userspace_dtr(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (lc->integrated_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		/* flush workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		if (atomic_read(&lc->sched_flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			flush_delayed_work(&lc->flush_log_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		destroy_workqueue(lc->dmlog_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				    NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (lc->log_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		dm_put_device(lc->ti, lc->log_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	mempool_exit(&lc->flush_entry_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	kfree(lc->usr_argv_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	kfree(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int userspace_presuspend(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				 NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int userspace_postsuspend(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * Run planned flush earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (lc->integrated_flush && atomic_read(&lc->sched_flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		flush_delayed_work(&lc->flush_log_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				 NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int userspace_resume(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	lc->in_sync_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 				 NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return lc->region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * userspace_is_clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * Check whether a region is clean.  If there is any sort of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * failure when consulting the server, we return not clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * Returns: 1 if clean, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	uint64_t region64 = (uint64_t)region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int64_t is_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	size_t rdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	rdata_size = sizeof(is_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 				 (char *)&region64, sizeof(region64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 				 (char *)&is_clean, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return (r) ? 0 : (int)is_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * userspace_in_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * Check if the region is in-sync.  If there is any sort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * of failure when consulting the server, we assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * the region is not in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * If 'can_block' is set, return immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			     int can_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	uint64_t region64 = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	int64_t in_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	size_t rdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	 * We can never respond directly - even if in_sync_hint is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	 * set.  This is because another machine could see a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	 * failure and mark the region out-of-sync.  If we don't go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	 * to userspace to ask, we might think the region is in-sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	 * and allow a read to pick up data that is stale.  (This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	 * very unlikely if a device actually fails; but it is very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	 * likely if a connection to one device from one machine fails.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	 * There still might be a problem if the mirror caches the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	 * state as in-sync... but then this call would not be made.  So,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	 * that is a mirror problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	if (!can_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		return -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	rdata_size = sizeof(in_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 				 (char *)&region64, sizeof(region64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 				 (char *)&in_sync, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return (r) ? 0 : (int)in_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct dm_dirty_log_flush_entry *fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	list_for_each_entry(fe, flush_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		r = userspace_do_request(lc, lc->uuid, fe->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 					 (char *)&fe->region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 					 sizeof(fe->region),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 					 NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static int flush_by_group(struct log_c *lc, struct list_head *flush_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			  int flush_with_payload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	uint32_t type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	struct dm_dirty_log_flush_entry *fe, *tmp_fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	LIST_HEAD(tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	uint64_t group[MAX_FLUSH_GROUP_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	 * Group process the requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	while (!list_empty(flush_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			group[count] = fe->region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			list_move(&fe->list, &tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			type = fe->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			if (count >= MAX_FLUSH_GROUP_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		if (flush_with_payload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 						 (char *)(group),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 						 count * sizeof(uint64_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 						 NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			 * Integrated flush failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			r = userspace_do_request(lc, lc->uuid, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 						 (char *)(group),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 						 count * sizeof(uint64_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 						 NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 				 * Group send failed.  Attempt one-by-one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 				list_splice_init(&tmp_list, flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 				r = flush_one_by_one(lc, flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	 * Must collect flush_entrys that were successfully processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	 * as a group so that they will be free'd by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	list_splice_init(&tmp_list, flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * userspace_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  * This function is ok to block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  * The flush happens in two stages.  First, it sends all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  * clear/mark requests that are on the list.  Then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * tells the server to commit them.  This gives the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * server a chance to optimise the commit, instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * doing it for every request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  * Additionally, we could implement another thread that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)  * sends the requests up to the server - reducing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  * load on flush.  Then the flush would have less in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  * the list and be responsible for the finishing commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  * Returns: 0 on success, < 0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static int userspace_flush(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	LIST_HEAD(mark_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	LIST_HEAD(clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	int mark_list_is_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	int clear_list_is_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	struct dm_dirty_log_flush_entry *fe, *tmp_fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	mempool_t *flush_entry_pool = &lc->flush_entry_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	spin_lock_irqsave(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	list_splice_init(&lc->mark_list, &mark_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	list_splice_init(&lc->clear_list, &clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	spin_unlock_irqrestore(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	mark_list_is_empty = list_empty(&mark_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	clear_list_is_empty = list_empty(&clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	if (mark_list_is_empty && clear_list_is_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	r = flush_by_group(lc, &clear_list, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (!lc->integrated_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		r = flush_by_group(lc, &mark_list, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 					 NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	 * Send integrated flush request with mark_list as payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	r = flush_by_group(lc, &mark_list, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	if (mark_list_is_empty && !atomic_read(&lc->sched_flush)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		 * When there are only clear region requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		 * we schedule a flush in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		queue_delayed_work(lc->dmlog_wq, &lc->flush_log_work, 3 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		atomic_set(&lc->sched_flush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		 * Cancel pending flush because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		 * have already flushed in mark_region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		cancel_delayed_work(&lc->flush_log_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		atomic_set(&lc->sched_flush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	 * We can safely remove these entries, even after failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	 * Calling code will receive an error and will know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	 * the log facility has failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		list_del(&fe->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		mempool_free(fe, flush_entry_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		list_del(&fe->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		mempool_free(fe, flush_entry_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		dm_table_event(lc->ti->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)  * userspace_mark_region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)  * This function should avoid blocking unless absolutely required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)  * (Memory allocation is valid for blocking.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	struct dm_dirty_log_flush_entry *fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	/* Wait for an allocation, but _never_ fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	fe = mempool_alloc(&lc->flush_entry_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	BUG_ON(!fe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	spin_lock_irqsave(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	fe->type = DM_ULOG_MARK_REGION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	fe->region = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	list_add(&fe->list, &lc->mark_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	spin_unlock_irqrestore(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  * userspace_clear_region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  * This function must not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)  * So, the alloc can't block.  In the worst case, it is ok to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)  * fail.  It would simply mean we can't clear the region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)  * Does nothing to current sync context, but does mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)  * the region will be re-sync'ed on a reload of the mirror
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)  * even though it is in-sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	struct dm_dirty_log_flush_entry *fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	 * If we fail to allocate, we skip the clearing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	 * the region.  This doesn't hurt us in any way, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	 * to cause the region to be resync'ed when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	 * device is activated next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	fe = mempool_alloc(&lc->flush_entry_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	if (!fe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		DMERR("Failed to allocate memory to clear region.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	spin_lock_irqsave(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	fe->type = DM_ULOG_CLEAR_REGION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	fe->region = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	list_add(&fe->list, &lc->clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	spin_unlock_irqrestore(&lc->flush_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)  * userspace_get_resync_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)  * Get a region that needs recovery.  It is valid to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)  * an error for this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)  * Returns: 1 if region filled, 0 if no work, <0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	size_t rdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		int64_t i; /* 64-bit for mix arch compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		region_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	} pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	if (lc->in_sync_hint >= lc->region_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	rdata_size = sizeof(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 				 NULL, 0, (char *)&pkg, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	*region = pkg.r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	return (r) ? r : (int)pkg.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)  * userspace_set_region_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)  * Set the sync status of a given region.  This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)  * must not fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void userspace_set_region_sync(struct dm_dirty_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 				      region_t region, int in_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		region_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		int64_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	} pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	pkg.r = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	pkg.i = (int64_t)in_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	(void) userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 				    (char *)&pkg, sizeof(pkg), NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	 * It would be nice to be able to report failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	 * However, it is easy enough to detect and resolve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)  * userspace_get_sync_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)  * If there is any sort of failure when consulting the server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)  * we assume that the sync count is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)  * Returns: sync count on success, 0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static region_t userspace_get_sync_count(struct dm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	size_t rdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	uint64_t sync_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	rdata_size = sizeof(sync_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 				 NULL, 0, (char *)&sync_count, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	if (sync_count >= lc->region_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		lc->in_sync_hint = lc->region_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	return (region_t)sync_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)  * userspace_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)  * Returns: amount of space consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 			    char *result, unsigned maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	char *table_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	size_t sz = (size_t)maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	switch (status_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	case STATUSTYPE_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 					 NULL, 0, result, &sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 			sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 			DMEMIT("%s 1 COM_FAILURE", log->type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	case STATUSTYPE_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		table_args = strchr(lc->usr_argv_str, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		BUG_ON(!table_args); /* There will always be a ' ' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		table_args++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		DMEMIT("%s %u %s ", log->type->name, lc->usr_argc, lc->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		if (lc->integrated_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 			DMEMIT("integrated_flush ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		DMEMIT("%s ", table_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	return (r) ? 0 : (int)sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)  * userspace_is_remote_recovering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)  * Returns: 1 if region recovering, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static int userspace_is_remote_recovering(struct dm_dirty_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 					  region_t region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	uint64_t region64 = region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	struct log_c *lc = log->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	static unsigned long limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		int64_t is_recovering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		uint64_t in_sync_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	} pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	size_t rdata_size = sizeof(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	 * Once the mirror has been reported to be in-sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	 * it will never again ask for recovery work.  So,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	 * we can safely say there is not a remote machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	 * recovering if the device is in-sync.  (in_sync_hint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	 * must be reset at resume time.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	if (region < lc->in_sync_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	else if (time_after(limit, jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	limit = jiffies + (HZ / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 				 (char *)&region64, sizeof(region64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 				 (char *)&pkg, &rdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	lc->in_sync_hint = pkg.in_sync_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	return (int)pkg.is_recovering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static struct dm_dirty_log_type _userspace_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	.name = "userspace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	.module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	.ctr = userspace_ctr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	.dtr = userspace_dtr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	.presuspend = userspace_presuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	.postsuspend = userspace_postsuspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	.resume = userspace_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	.get_region_size = userspace_get_region_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	.is_clean = userspace_is_clean,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 	.in_sync = userspace_in_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	.flush = userspace_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	.mark_region = userspace_mark_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	.clear_region = userspace_clear_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	.get_resync_work = userspace_get_resync_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	.set_region_sync = userspace_set_region_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	.get_sync_count = userspace_get_sync_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	.status = userspace_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	.is_remote_recovering = userspace_is_remote_recovering,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int __init userspace_dirty_log_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 	_flush_entry_cache = KMEM_CACHE(dm_dirty_log_flush_entry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	if (!_flush_entry_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 		DMWARN("Unable to create flush_entry_cache: No memory.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 	r = dm_ulog_tfr_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 		DMWARN("Unable to initialize userspace log communications");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		kmem_cache_destroy(_flush_entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 	r = dm_dirty_log_type_register(&_userspace_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 		DMWARN("Couldn't register userspace dirty log type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		dm_ulog_tfr_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 		kmem_cache_destroy(_flush_entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 		return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 	DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void __exit userspace_dirty_log_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 	dm_dirty_log_type_unregister(&_userspace_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 	dm_ulog_tfr_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	kmem_cache_destroy(_flush_entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) module_init(userspace_dirty_log_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) module_exit(userspace_dirty_log_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) MODULE_LICENSE("GPL");