Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)   drbd_int.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #ifndef _DRBD_INT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define _DRBD_INT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/dynamic_debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/lru_cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/drbd_genl_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/drbd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "drbd_strings.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "drbd_state.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "drbd_protocol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #ifdef __CHECKER__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) # define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) # define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) # define __protected_by(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) # define __protected_read_by(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) # define __protected_write_by(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /* shared module parameters, defined in drbd_main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #ifdef CONFIG_DRBD_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) extern int drbd_enable_faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) extern int drbd_fault_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) extern unsigned int drbd_minor_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) extern char drbd_usermode_helper[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) extern int drbd_proc_details;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /* This is used to stop/restart our threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Cannot use SIGTERM nor SIGKILL, since these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * are sent out by init on runlevel changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * I choose SIGHUP for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define DRBD_SIGKILL SIGHUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define ID_IN_SYNC      (4711ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define ID_OUT_OF_SYNC  (4712ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define ID_SYNCER (-1ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) struct drbd_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) struct drbd_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define __drbd_printk_device(level, device, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define __drbd_printk_resource(level, resource, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	printk(level "drbd %s: " fmt, (resource)->name, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define __drbd_printk_connection(level, connection, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) void drbd_printk_with_wrong_object_type(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	(__builtin_types_compatible_p(typeof(obj), type) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 __builtin_types_compatible_p(typeof(obj), const type)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	func(level, (const type)(obj), fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define drbd_printk(level, obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	__builtin_choose_expr( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	  __drbd_printk_if_same_type(obj, struct drbd_device *, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 			     __drbd_printk_device, level, fmt, ## args), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	  __builtin_choose_expr( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	    __drbd_printk_if_same_type(obj, struct drbd_resource *, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			       __drbd_printk_resource, level, fmt, ## args), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	    __builtin_choose_expr( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	      __drbd_printk_if_same_type(obj, struct drbd_connection *, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 				 __drbd_printk_connection, level, fmt, ## args), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	      __builtin_choose_expr( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		__drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 				 __drbd_printk_peer_device, level, fmt, ## args), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		drbd_printk_with_wrong_object_type()))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define drbd_dbg(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	drbd_printk(KERN_DEBUG, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define drbd_alert(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	drbd_printk(KERN_ALERT, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define drbd_err(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	drbd_printk(KERN_ERR, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define drbd_warn(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	drbd_printk(KERN_WARNING, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define drbd_info(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	drbd_printk(KERN_INFO, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define drbd_emerg(obj, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	drbd_printk(KERN_EMERG, obj, fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define dynamic_drbd_dbg(device, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define D_ASSERT(device, exp)	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	if (!(exp)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * expect  -  Make an assertion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * Unlike the assert macro, this macro returns a boolean result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define expect(exp) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		bool _bool = (exp);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		if (!_bool)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			drbd_err(device, "ASSERTION %s FAILED in %s\n",		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			        #exp, __func__);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		_bool;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /* Defines to control fault insertion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	DRBD_FAULT_MD_WR = 0,	/* meta data write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	DRBD_FAULT_MD_RD = 1,	/*           read  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	DRBD_FAULT_RS_WR = 2,	/* resync          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	DRBD_FAULT_RS_RD = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	DRBD_FAULT_DT_WR = 4,	/* data            */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	DRBD_FAULT_DT_RD = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	DRBD_FAULT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) extern unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) _drbd_insert_fault(struct drbd_device *device, unsigned int type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) drbd_insert_fault(struct drbd_device *device, unsigned int type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #ifdef CONFIG_DRBD_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return drbd_fault_rate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		(drbd_enable_faults & (1<<type)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		_drbd_insert_fault(device, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /* integer division, round _UP_ to the next integer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /* usual integer division */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define div_floor(A, B) ((A)/(B))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) extern struct ratelimit_state drbd_ratelimit_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) extern const char *cmdname(enum drbd_packet cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /* for sending/receiving the bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * possibly in some encoding scheme */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) struct bm_xfer_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	/* "const"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 * stores total bits and long words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 * of the bitmap, so we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	 * call the accessor functions over and again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	unsigned long bm_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	unsigned long bm_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/* during xfer, current position within the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	unsigned long bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	unsigned long word_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/* statistics; index: (h->command == P_BITMAP) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	unsigned packets[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	unsigned bytes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) extern void INFO_bm_xfer_stats(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		const char *direction, struct bm_xfer_ctx *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	/* word_offset counts "native long words" (32 or 64 bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 * aligned at 64 bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 * Encoded packet may end at an unaligned bit offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * In case a fallback clear text packet is transmitted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * between, we adjust this offset back to the last 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 * aligned "native long word", which makes coding and decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	 * the plain text bitmap much more convenient.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	c->word_offset = c->bit_offset >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #elif BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	c->word_offset = c->bit_offset >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	c->word_offset &= ~(1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) # error "unsupported BITS_PER_LONG"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) extern unsigned int drbd_header_size(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) /**********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) enum drbd_thread_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	EXITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	RESTARTING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) struct drbd_thread {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	spinlock_t t_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct completion stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	enum drbd_thread_state t_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	int (*function) (struct drbd_thread *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	int reset_cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	/* THINK testing the t_state seems to be uncritical in all cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 * (but thread_{start,stop}), so we can read it *without* the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 *	--lge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	return thi->t_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) struct drbd_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	int (*cb)(struct drbd_work *, int cancel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) struct drbd_device_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) #include "drbd_interval.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) extern void lock_all_resources(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) extern void unlock_all_resources(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) struct drbd_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	/* if local IO is not allowed, will be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * if local IO _is_ allowed, holds the locally submitted bio clone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * or, after local IO completion, the ERR_PTR(error).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 * see drbd_request_endio(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	struct bio *private_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct drbd_interval i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	/* epoch: used to check on "completion" whether this req was in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * the current epoch, and we therefore have to close it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 * causing a p_barrier packet to be send, starting a new epoch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * This corresponds to "barrier" in struct p_barrier[_ack],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * and to "barrier_nr" in struct drbd_epoch (and various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 * comments/function parameters/local variable names).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned int epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct list_head tl_requests; /* ring list in the transfer log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	struct bio *master_bio;       /* master bio pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	/* see struct drbd_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct list_head req_pending_master_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct list_head req_pending_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* for generic IO accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	unsigned long start_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* for DRBD internal statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* Minimal set of time stamps to determine if we wait for activity log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 * transactions, local disk or peer.  32 bit "jiffies" are good enough,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 * we don't expect a DRBD request to be stalled for several month.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	/* before actual request processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	unsigned long in_actlog_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/* local disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	unsigned long pre_submit_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	/* per connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	unsigned long pre_send_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	unsigned long acked_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	unsigned long net_done_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/* Possibly even more detail to track each phase:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 *  master_completion_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 *      how long did it take to complete the master bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 *      (application visible latency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 *  allocated_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 *      how long the master bio was blocked until we finally allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 *      a tracking struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 *  in_actlog_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 *      how long did we wait for activity log transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 *  net_queued_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 *      when did we finally queue it for sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	 *  pre_send_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	 *      when did we start sending it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	 *  post_send_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	 *      how long did we block in the network stack trying to send it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	 *  acked_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 *      when did we receive (or fake, in protocol A) a remote ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 *  net_done_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 *      when did we receive final acknowledgement (P_BARRIER_ACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 *      or decide, e.g. on connection loss, that we do no longer expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 *      anything from this peer for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 *  pre_submit_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 *  post_sub_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 *      when did we start submiting to the lower level device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 *      and how long did we block in that submit function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 *  local_completion_jif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 *      how long did it take the lower level device to complete this request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	/* once it hits 0, we may complete the master_bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	atomic_t completion_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	/* once it hits 0, we may destroy this drbd_request object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	unsigned rq_state; /* see comments above _req_mod() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) struct drbd_epoch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	unsigned int barrier_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	atomic_t epoch_size; /* increased on every request added. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	atomic_t active;     /* increased on every req. added, and dec on every finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) /* Prototype declaration of function defined in drbd_receiver.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) int drbdd_init(struct drbd_thread *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) int drbd_asender(struct drbd_thread *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) /* drbd_epoch flag bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	DE_HAVE_BARRIER_NUMBER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) enum epoch_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	EV_PUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	EV_GOT_BARRIER_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	EV_BECAME_LAST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	EV_CLEANUP = 32, /* used as flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) struct digest_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	int digest_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	void *digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) struct drbd_peer_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct drbd_peer_device *peer_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	struct drbd_epoch *epoch; /* for writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	atomic_t pending_bios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct drbd_interval i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	/* see comments on ee flag bits below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	unsigned long submit_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		u64 block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		struct digest_info *digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) /* ee flag bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * While corresponding bios are in flight, the only modification will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * set_bit WAS_ERROR, which has to be atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * If no bios are in flight yet, or all have been completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  * non-atomic modification to ee->flags is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	__EE_CALL_AL_COMPLETE_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	__EE_MAY_SET_IN_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/* is this a TRIM aka REQ_OP_DISCARD? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	__EE_TRIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	/* explicit zero-out requested, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * our lower level cannot handle trim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * and we want to fall back to zeroout instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	__EE_ZEROOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/* In case a barrier failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 * we need to resubmit without the barrier flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	__EE_RESUBMITTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/* we may have several bios per peer request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 * if any of those fail, we set this flag atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 * from the endio callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	__EE_WAS_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	/* This ee has a pointer to a digest instead of a block id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	__EE_HAS_DIGEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* Conflicting local requests need to be restarted after this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	__EE_RESTART_REQUESTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	/* The peer wants a write ACK for this (wire proto C) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	__EE_SEND_WRITE_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	/* Is set when net_conf had two_primaries set while creating this peer_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	__EE_IN_INTERVAL_TREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* for debugfs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	/* has this been submitted, or does it still wait for something else? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	__EE_SUBMITTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* this is/was a write request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	__EE_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* this is/was a write same request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	__EE_WRITE_SAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* this originates from application on peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 * (not some resync or verify or other DRBD internal request) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	__EE_APPLICATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	/* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	__EE_RS_THIN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #define EE_TRIM                (1<<__EE_TRIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #define EE_ZEROOUT             (1<<__EE_ZEROOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) #define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) #define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) #define EE_SUBMITTED		(1<<__EE_SUBMITTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) #define EE_WRITE		(1<<__EE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) #define EE_WRITE_SAME		(1<<__EE_WRITE_SAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) #define EE_APPLICATION		(1<<__EE_APPLICATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) #define EE_RS_THIN_REQ		(1<<__EE_RS_THIN_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) /* flag bits per device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	MD_DIRTY,		/* current uuids and flags not yet on disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	CL_ST_CHG_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	CL_ST_CHG_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	CRASHED_PRIMARY,	/* This node was a crashed primary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				 * Gets cleared when the state.conn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 				 * goes into C_CONNECTED state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	CONSIDER_RESYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	BITMAP_IO,		/* suspend application io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				   once no more io in flight, start bitmap io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	WAS_IO_ERROR,		/* Local disk failed, returned IO error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	WAS_READ_ERROR,		/* Local disk READ failed (set additionally to the above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 				 * the peer, if it changed there as well. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	B_RS_H_DONE,		/* Before resync handler done (already executed) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	READ_BALANCE_RR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	FLUSH_PENDING,		/* if set, device->flush_jif is when we submitted that flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				 * from drbd_flush_after_epoch() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	/* cleared only after backing device related structures have been destroyed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	GOING_DISKLESS,		/* Disk is being detached, because of io-error, or admin request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	/* to be used in drbd_device_post_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	GO_DISKLESS,		/* tell worker to schedule cleanup before detach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	DESTROY_DISK,		/* tell worker to close backing devices and destroy related structures. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	MD_SYNC,		/* tell worker to call drbd_md_sync() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	RS_START,		/* tell worker to start resync/OV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	RS_PROGRESS,		/* tell worker that resync made significant progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	RS_DONE,		/* tell worker that resync is done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) struct drbd_bitmap; /* opaque for drbd_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /* definition of bits in bm_flags to be used in drbd_bm_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * and drbd_bitmap_io and friends. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) enum bm_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* currently locked for bulk operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	BM_LOCKED_MASK = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* in detail, that is: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	BM_DONT_CLEAR = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	BM_DONT_SET   = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	BM_DONT_TEST  = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* so we can mark it locked for bulk operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * and still allow all non-bulk operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	BM_IS_LOCKED  = 0x8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* (test bit, count bit) allowed (common case) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	/* testing bits, as well as setting new bits allowed, but clearing bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	 * requires sending of "out-of-sync" information, though. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* for drbd_bm_write_copy_pages, everything is allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * only concurrent bulk operations are locked out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) struct drbd_work_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct list_head q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	spinlock_t q_lock;  /* to protect the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	wait_queue_head_t q_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) struct drbd_socket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	struct socket    *socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* this way we get our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 * send/receive buffers off the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	void *sbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	void *rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) struct drbd_md {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	u64 md_offset;		/* sector offset to 'super' block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	u64 la_size_sect;	/* last agreed size, unit sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	spinlock_t uuid_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	u64 uuid[UI_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	u64 device_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	u32 md_size_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	s32 al_offset;	/* signed relative sector offset to activity log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	s32 bm_offset;	/* signed relative sector offset to bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	s32 meta_dev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/* see al_tr_number_to_on_disk_sector() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	u32 al_stripes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	u32 al_stripe_size_4k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	u32 al_size_4k; /* cached product of the above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) struct drbd_backing_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct block_device *backing_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct block_device *md_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct drbd_md md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	sector_t known_size; /* last known size of that backing device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) struct drbd_md_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	unsigned long start_jif;	/* last call to drbd_md_get_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	unsigned long submit_jif;	/* last _drbd_md_sync_page_io() submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	const char *current_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	atomic_t in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	unsigned int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) struct bm_io_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct drbd_work w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	char *why;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	enum bm_flag flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	int (*io_fn)(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	void (*done)(struct drbd_device *device, int rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) struct fifo_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	unsigned int head_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	int total; /* sum of all values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	int values[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) /* flag bits per connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	NET_CONGESTED,		/* The data socket is congested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	RESOLVE_CONFLICTS,	/* Set on one node, cleared on the peer! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	SEND_PING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	CONN_WD_ST_CHG_OKAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	CONN_WD_ST_CHG_FAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	STATE_SENT,		/* Do not change state/UUIDs while this is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				 * pending, from drbd worker context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 				 * If set, bdi_write_congested() returns true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				 * so shrink_page_list() would not recurse into,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				 * and potentially deadlock on, this drbd worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	DISCONNECT_SENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	DEVICE_WORK_PENDING,	/* tell worker that some device has pending work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) enum which_state { NOW, OLD = NOW, NEW };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) struct drbd_resource {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	struct dentry *debugfs_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct dentry *debugfs_res_volumes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	struct dentry *debugfs_res_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct dentry *debugfs_res_in_flight_summary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct idr devices;		/* volume number to device mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct list_head connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct list_head resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct res_opts res_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct mutex adm_mutex;		/* mutex to serialize administrative requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	spinlock_t req_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	unsigned susp:1;		/* IO suspended by user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	unsigned susp_nod:1;		/* IO suspended because no data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	enum write_ordering_e write_ordering;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	cpumask_var_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) struct drbd_thread_timing_details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	unsigned long start_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	void *cb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	const char *caller_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	unsigned int line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	unsigned int cb_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) struct drbd_connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	struct list_head connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct dentry *debugfs_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct dentry *debugfs_conn_callback_history;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct dentry *debugfs_conn_oldest_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct idr peer_devices;	/* volume number to peer device mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct mutex cstate_mutex;	/* Protects graceful disconnects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	unsigned int connect_cnt;	/* Inc each time a connection is established */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct net_conf *net_conf;	/* content protected by rcu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct sockaddr_storage my_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int my_addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct sockaddr_storage peer_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int peer_addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	struct drbd_socket meta;	/* ping/ack (metadata) packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	int agreed_pro_version;		/* actually used protocol version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	u32 agreed_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	unsigned long last_received;	/* in jiffies, either socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	unsigned int ko_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct list_head transfer_log;	/* all requests not yet fully processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct crypto_shash *cram_hmac_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct crypto_shash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	struct crypto_shash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	struct crypto_shash *csums_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct crypto_shash *verify_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	void *int_dig_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	void *int_dig_vv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* receiver side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct drbd_epoch *current_epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	spinlock_t epoch_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	unsigned int epochs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	atomic_t current_tle_nr;	/* transfer log epoch number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	unsigned current_tle_writes;	/* writes seen within this tl epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	unsigned long last_reconnect_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* empty member on older kernels without blk_start_plug() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct blk_plug receiver_plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct drbd_thread receiver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct drbd_thread worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	struct drbd_thread ack_receiver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct workqueue_struct *ack_sender;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* cached pointers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * so we can look up the oldest pending requests more quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * protected by resource->req_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	struct drbd_request *req_next; /* DRBD 9: todo.req_next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct drbd_request *req_ack_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct drbd_request *req_not_net_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	/* sender side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct drbd_work_queue sender_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) #define DRBD_THREAD_DETAILS_HIST	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	unsigned int w_cb_nr; /* keeps counting up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	unsigned int r_cb_nr; /* keeps counting up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		unsigned long last_sent_barrier_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		/* whether this sender thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		 * has processed a single write yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		bool seen_any_write_yet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		/* Which barrier number to send with the next P_BARRIER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		int current_epoch_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		/* how many write requests have been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		 * with req->epoch == current_epoch_nr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		 * If none, no P_BARRIER will be sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		unsigned current_epoch_writes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	} send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static inline bool has_net_conf(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	bool has_net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	has_net_conf = rcu_dereference(connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	return has_net_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) void __update_timing_details(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		struct drbd_thread_timing_details *tdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		unsigned int *cb_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		void *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		const char *fn, const unsigned int line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) #define update_worker_timing_details(c, cb) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) #define update_receiver_timing_details(c, cb) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) struct submit_worker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	/* protected by ..->resource->req_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	struct list_head writes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) struct drbd_peer_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	struct list_head peer_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	struct work_struct send_acks_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct dentry *debugfs_peer_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) struct drbd_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct list_head peer_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct list_head pending_bitmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	unsigned long flush_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct dentry *debugfs_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct dentry *debugfs_vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct dentry *debugfs_vol_oldest_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct dentry *debugfs_vol_act_log_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	struct dentry *debugfs_vol_resync_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct dentry *debugfs_vol_data_gen_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct dentry *debugfs_vol_ed_gen_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	unsigned int vnr;	/* volume number within the connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	unsigned int minor;	/* device minor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	/* things that are stored as / read from meta data on disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	/* configured by drbdsetup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct drbd_backing_dev *ldev __protected_by(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	sector_t p_size;     /* partner's disk size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct request_queue *rq_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct gendisk	    *vdisk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned long last_reattach_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct drbd_work resync_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct drbd_work unplug_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct timer_list resync_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct timer_list md_sync_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct timer_list start_resync_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	struct timer_list request_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* Used after attach while negotiating new disk state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	union drbd_state new_state_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	union drbd_dev_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	wait_queue_head_t misc_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	wait_queue_head_t state_wait;  /* upon each state change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	unsigned int send_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	unsigned int recv_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	unsigned int read_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	unsigned int writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	unsigned int al_writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	unsigned int bm_writ_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	atomic_t unacked_cnt;	 /* Need to send replies for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	atomic_t local_cnt;	 /* Waiting for local completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	atomic_t suspend_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* Interval tree of pending local requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	struct rb_root read_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct rb_root write_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* for statistics and timeouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* [0] read, [1] write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct list_head pending_master_completion[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct list_head pending_completion[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	/* use checksums for *this* resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	bool use_csums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	unsigned long rs_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	/* number of resync blocks that failed in this run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned long rs_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	/* Syncer's start time [unit jiffies] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	unsigned long rs_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/* cumulated time in PausedSyncX state [unit jiffies] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	unsigned long rs_paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	unsigned long rs_same_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) #define DRBD_SYNC_MARKS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) #define DRBD_SYNC_MARK_STEP (3*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* marks's time [unit jiffies] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/* current index into rs_mark_{left,time} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	int rs_last_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	unsigned long rs_last_bcast; /* [unit jiffies] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	/* where does the admin want us to start? (sector) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	sector_t ov_start_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	sector_t ov_stop_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	/* where are we now? (sector) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	sector_t ov_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	/* Start sector of out of sync range (to merge printk reporting). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	sector_t ov_last_oos_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/* size of out-of-sync range in sectors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	sector_t ov_last_oos_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned long ov_left; /* in bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	struct drbd_bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/* Used to track operations of resync... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	struct lru_cache *resync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	/* Number of locked elements in resync LRU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	unsigned int resync_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/* resync extent number waiting for application requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	unsigned int resync_wenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	int open_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	u64 *p_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	struct list_head done_ee;   /* need to send P_WRITE_ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	struct list_head net_ee;    /* zero-copy network send in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	int next_barrier_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct list_head resync_reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	atomic_t pp_in_use;		/* allocated from page pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	wait_queue_head_t ee_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct drbd_md_io md_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	spinlock_t al_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	wait_queue_head_t al_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct lru_cache *act_log;	/* activity log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	unsigned int al_tr_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	int al_tr_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	wait_queue_head_t seq_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	atomic_t packet_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	unsigned int peer_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	spinlock_t peer_seq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	unsigned long comm_bm_set; /* communicated number of set bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct bm_io_work bm_io_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	u64 ed_uuid; /* UUID of the exposed data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct mutex own_state_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	char congestion_reason;  /* Why we where congested... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	int rs_last_sect_ev; /* counter to compare with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	int rs_last_events;  /* counter of read or write "events" (unit sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			      * on the lower level device when we last looked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	int c_sync_rate; /* current resync rate after syncer throttle magic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	unsigned int peer_max_bio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	unsigned int local_max_bio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/* any requests that would block in drbd_make_request()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * are deferred to this single-threaded work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct submit_worker submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) struct drbd_bm_aio_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct list_head list; /* on device->pending_bitmap_io */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	unsigned long start_jif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	atomic_t in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	unsigned int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	unsigned flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) #define BM_AIO_COPY_PAGES	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) #define BM_AIO_WRITE_HINTED	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) #define BM_AIO_WRITE_ALL_PAGES	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) #define BM_AIO_READ		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) struct drbd_config_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	/* assigned from drbd_genlmsghdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	unsigned int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* assigned from request attributes, if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	unsigned int volume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) #define VOLUME_UNSPECIFIED		(-1U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* pointer into the request skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * limited lifetime! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	char *resource_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct nlattr *my_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct nlattr *peer_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	/* reply buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct sk_buff *reply_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	/* pointer into reply buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct drbd_genlmsghdr *reply_dh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* resolved from attributes, if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct drbd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct drbd_resource *resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct drbd_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static inline struct drbd_device *minor_to_device(unsigned int minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	return (struct drbd_device *)idr_find(&drbd_devices, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static inline struct drbd_peer_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) conn_peer_device(struct drbd_connection *connection, int volume_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	return idr_find(&connection->peer_devices, volume_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) #define for_each_resource(resource, _resources) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	list_for_each_entry(resource, _resources, resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #define for_each_resource_rcu(resource, _resources) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	list_for_each_entry_rcu(resource, _resources, resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) #define for_each_resource_safe(resource, tmp, _resources) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	list_for_each_entry_safe(resource, tmp, _resources, resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) #define for_each_connection(connection, resource) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	list_for_each_entry(connection, &resource->connections, connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) #define for_each_connection_rcu(connection, resource) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	list_for_each_entry_rcu(connection, &resource->connections, connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) #define for_each_connection_safe(connection, tmp, resource) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #define for_each_peer_device(peer_device, device) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #define for_each_peer_device_rcu(peer_device, device) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) #define for_each_peer_device_safe(peer_device, tmp, device) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static inline unsigned int device_to_minor(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	return device->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * function declarations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  *************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* drbd_main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) enum dds_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	DDSF_FORCED    = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) extern void drbd_init_set_defaults(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) extern int  drbd_thread_start(struct drbd_thread *thi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #define drbd_thread_current_set_cpu(A) ({})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		       unsigned int set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) extern void tl_clear(struct drbd_connection *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) extern void drbd_free_sock(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		     void *buf, size_t size, unsigned msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			 unsigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) extern int drbd_send_protocol(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) extern int drbd_send_uuids(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) extern int drbd_send_current_state(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) extern int drbd_send_sync_param(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			    u32 set_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			 struct drbd_peer_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			     struct p_block_req *rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			     struct p_data *dp, int data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			    sector_t sector, int blksize, u64 block_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			   struct drbd_peer_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			      sector_t sector, int size, u64 block_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 				   int size, void *digest, int digest_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				   enum drbd_packet cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) extern int drbd_send_bitmap(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) extern void drbd_device_cleanup(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) extern void drbd_print_uuids(struct drbd_device *device, const char *text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) extern void drbd_queue_unplug(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) extern void conn_md_sync(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) extern void drbd_md_write(struct drbd_device *device, void *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) extern void drbd_md_sync(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) extern void drbd_md_mark_dirty(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) extern void drbd_queue_bitmap_io(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				 int (*io_fn)(struct drbd_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 				 void (*done)(struct drbd_device *, int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				 char *why, enum bm_flag flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) extern int drbd_bitmap_io(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		int (*io_fn)(struct drbd_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		char *why, enum bm_flag flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		int (*io_fn)(struct drbd_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		char *why, enum bm_flag flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* Meta data layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  * We currently have two possible layouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * Offsets in (512 byte) sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * external:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  *   |----------- md_size_sect ------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  *   [ 4k superblock ][ activity log ][  Bitmap  ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  *   | al_offset == 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  *   | bm_offset = al_offset + X      |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  *  ==> bitmap sectors = md_size_sect - bm_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  *  Variants:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  *     old, indexed fixed size meta data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  * internal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  *            |----------- md_size_sect ------------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  *                        | al_offset < 0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  *            | bm_offset = al_offset - Y |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  *  ==> bitmap sectors = Y = al_offset - bm_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)  *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)  *  end of the device, so that the [4k superblock] will be 4k aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  *  The activity log consists of 4k transaction blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  *  which are written in a ring-buffer, or striped ring-buffer like fashion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  *  which are writtensize used to be fixed 32kB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  *  but is about to become configurable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Our old fixed size meta data layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * allows up to about 3.8TB, so if you want more,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * you need to use the "flexible" meta data format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) #define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #define MD_4kB_SECT	 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) #define MD_32kB_SECT	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* One activity log extent represents 4M of storage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #define AL_EXTENT_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /* We could make these currently hardcoded constants configurable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  * variables at create-md time (or even re-configurable at runtime?).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)  * Which will require some more changes to the DRBD "super block"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * and attach code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * updates per transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  *   This many changes to the active set can be logged with one transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  *   This number is arbitrary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)  * context per transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  *   This many context extent numbers are logged with each transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  *   This number is resulting from the transaction block size (4k), the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *   of the transaction header, and the number of updates per transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  *   See drbd_actlog.c:struct al_transaction_on_disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) #define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #define LN2_BPL 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #define cpu_to_lel(A) cpu_to_le32(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #define lel_to_cpu(A) le32_to_cpu(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) #elif BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #define LN2_BPL 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #define cpu_to_lel(A) cpu_to_le64(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #define lel_to_cpu(A) le64_to_cpu(A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) #error "LN2 of BITS_PER_LONG unknown!"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* resync bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* 16MB sized 'bitmap extent' to track syncer usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct bm_extent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	int rs_left; /* number of bits set (out of sync) in this extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	int rs_failed; /* number of failed resync requests in this extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	struct lc_element lce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* drbd_bitmap.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  * We need to store one bit for a block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  * Bit 0 ==> local node thinks this block is binary identical on both nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)  * Bit 1 ==> local node thinks this block needs to be synced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #define SLEEP_TIME (HZ/10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* We do bitmap IO in units of 4k blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * We also still have a hardcoded 4k per bit relation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) #define BM_BLOCK_SHIFT	12			 /* 4k per bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* mostly arbitrarily set the represented size of one bitmap extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * at 4k per bit resolution) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) #define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) #define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #error "HAVE YOU FIXED drbdmeta AS WELL??"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* thus many _storage_ sectors are described by one bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) #define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) #define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* bit to represented kilo byte conversion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* in which _bitmap_ extent (resp. sector) the bit for a certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * _storage_ sector is located in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) #define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) #define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* first storage sector a bitmap extent corresponds to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) #define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* how much _storage_ sectors we have per bitmap extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* how many bits are covered by one bitmap extent (resync extent) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) #define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) #define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* in one sector of the bitmap, we have this many activity_log extents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* the extent in "PER_EXTENT" below is an activity log extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * we need that many (long words/bytes) to store the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  *		     of one AL_EXTENT_SIZE chunk of storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * we can store the bitmap for that many AL_EXTENTS within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * one sector of the _on_disk_ bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * bit	 0	  bit 37   bit 38	     bit (512*8)-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  *	     ...|........|........|.. // ..|........|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * sect. 0	 `296	  `304			   ^(512*8*8)-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) #define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* we have a certain meta data variant that has a fixed on-disk size of 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * log, leaving this many sectors for the bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) #define DRBD_MAX_SECTORS_FIXED_BM \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* 16 TB in units of sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* adjust by one page worth of bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * so we won't wrap around in drbd_bm_find_next_bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * you should use 64bit OS for that much storage, anyways. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* corresponds to (1UL << 38) bits right now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Estimate max bio size as 256 * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  * Since we may live in a mixed-platform cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * we limit us to a platform agnostic constant here for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * A followup commit may allow even bigger BIO sizes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * once we thought that through. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) #define DRBD_MAX_BIO_SIZE (1U << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) #if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* For now, don't allow more than half of what we can "activate" in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * activity log transaction to be discarded in one go. We may need to rework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * drbd_al_begin_io() to allow for even larger discard ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) #define DRBD_MAX_BATCH_BIO_SIZE	 (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) #define DRBD_MAX_BBIO_SECTORS    (DRBD_MAX_BATCH_BIO_SIZE >> 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) extern int  drbd_bm_init(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) extern void drbd_bm_cleanup(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) extern void drbd_bm_set_all(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) extern void drbd_bm_clear_all(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* set/clear/test only a few bits at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) extern int  drbd_bm_set_bits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		struct drbd_device *device, unsigned long s, unsigned long e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) extern int  drbd_bm_clear_bits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		struct drbd_device *device, unsigned long s, unsigned long e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) extern int drbd_bm_count_bits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	struct drbd_device *device, const unsigned long s, const unsigned long e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* bm_set_bits variant for use while holding drbd_bm_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)  * may process the whole bitmap in one go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) extern void _drbd_bm_set_bits(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		const unsigned long s, const unsigned long e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) extern int  drbd_bm_read(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) extern int  drbd_bm_write(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) extern int  drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) extern size_t	     drbd_bm_words(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) extern unsigned long drbd_bm_bits(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) extern sector_t      drbd_bm_capacity(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) #define DRBD_END_OF_BITMAP	(~(unsigned long)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* bm_find_next variants for use while you hold drbd_bm_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* for receive_bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		size_t number, unsigned long *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* for _drbd_send_bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		size_t number, unsigned long *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) extern void drbd_bm_unlock(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* drbd_main.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) extern struct kmem_cache *drbd_request_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) extern mempool_t drbd_request_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) extern mempool_t drbd_ee_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* drbd's page pool, used to buffer data received from the peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  * or data requested by the peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * This does not have an emergency reserve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * When allocating from this pool, it first takes pages from the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * Only if the pool is depleted will try to allocate from the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * The assumption is that pages taken from this pool will be processed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  * and given back, "quickly", and then can be recycled, so we can avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  * frequent calls to alloc_page(), and still will be able to make progress even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  * under memory pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) extern struct page *drbd_pp_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) extern spinlock_t   drbd_pp_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) extern int	    drbd_pp_vacant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) extern wait_queue_head_t drbd_pp_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /* We also need a standard (emergency-reserve backed) page pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  * for meta data IO (activity log, bitmap).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  * We can keep it global, as long as it is used as "N pages at a time".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  * 128 should be plenty, currently we probably can get away with as few as 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) #define DRBD_MIN_POOL_PAGES	128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) extern mempool_t drbd_md_io_page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* We also need to make sure we get a bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  * when we need it for housekeeping purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) extern struct bio_set drbd_md_io_bio_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* to allocate from that set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* And a bio_set for cloning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) extern struct bio_set drbd_io_bio_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) extern struct mutex resources_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) extern int conn_lowest_minor(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) extern void drbd_destroy_device(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) extern void drbd_delete_device(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) extern struct drbd_resource *drbd_create_resource(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) extern void drbd_free_resource(struct drbd_resource *resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) extern void drbd_destroy_connection(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 					    void *peer_addr, int peer_addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) extern struct drbd_resource *drbd_find_resource(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) extern void drbd_destroy_resource(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) extern void conn_free_crypto(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* drbd_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) extern void do_submit(struct work_struct *ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) extern blk_qc_t drbd_submit_bio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) extern int is_valid_ar_handle(struct drbd_request *, sector_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /* drbd_nl.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) extern struct mutex notification_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) extern void drbd_suspend_io(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) extern void drbd_resume_io(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) extern char *ppsize(char *buf, unsigned long long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) enum determine_dev_size {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	DS_ERROR_SHRINK = -3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	DS_ERROR_SPACE_MD = -2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	DS_ERROR = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	DS_UNCHANGED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	DS_SHRUNK = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	DS_GREW = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	DS_GREW_FROM_ZERO = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) extern enum determine_dev_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) extern void resync_after_online_grow(struct drbd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			struct drbd_backing_dev *bdev, struct o_qlim *o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 					enum drbd_role new_role,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 					int force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) extern bool conn_try_outdate_peer(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) extern int drbd_khelper(struct drbd_device *device, char *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /* drbd_worker.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /* bi_end_io handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) extern void drbd_md_endio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) extern void drbd_peer_request_endio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) extern void drbd_request_endio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) extern int drbd_worker(struct drbd_thread *thi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) void drbd_resync_after_changed(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) extern void resume_next_sg(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) extern void suspend_other_sg(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) extern int drbd_resync_finished(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* maybe rather drbd_main.c ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) extern void drbd_md_put_buffer(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) extern int drbd_md_sync_page_io(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		struct drbd_backing_dev *bdev, sector_t sector, int op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) extern void wait_until_done_or_force_detached(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		struct drbd_backing_dev *bdev, unsigned int *done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) extern void drbd_rs_controller_reset(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static inline void ov_out_of_sync_print(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (device->ov_last_oos_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		     (unsigned long long)device->ov_last_oos_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		     (unsigned long)device->ov_last_oos_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	device->ov_last_oos_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			 void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* worker callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) extern int w_e_end_data_req(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) extern int w_e_end_rsdata_req(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) extern int w_e_end_csum_rs_req(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) extern int w_e_end_ov_reply(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) extern int w_e_end_ov_req(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) extern int w_ov_finished(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) extern int w_resync_timer(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) extern int w_send_write_hint(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) extern int w_send_dblock(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) extern int w_send_read_req(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) extern int w_e_reissue(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) extern int w_restart_disk_io(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) extern int w_send_out_of_sync(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) extern int w_start_resync(struct drbd_work *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) extern void resync_timer_fn(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) extern void start_resync_timer_fn(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* drbd_receiver.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		sector_t start, unsigned int nr_sectors, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) extern int drbd_receiver(struct drbd_thread *thi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) extern int drbd_ack_receiver(struct drbd_thread *thi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) extern void drbd_send_ping_wf(struct work_struct *ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) extern void drbd_send_acks_wf(struct work_struct *ws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		bool throttle_if_app_is_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) extern int drbd_submit_peer_request(struct drbd_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 				    struct drbd_peer_request *, const unsigned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 				    const unsigned, const int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 						     sector_t, unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 						     unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 						     gfp_t) __must_hold(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 				 int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) extern int drbd_connected(struct drbd_peer_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* sets the number of 512 byte sectors of our virtual device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * used to submit our private bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static inline void drbd_submit_bio_noacct(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 					     int fault_type, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	__release(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (!bio->bi_disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		bio->bi_status = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		bio_endio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	if (drbd_insert_fault(device, fault_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		submit_bio_noacct(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			      enum write_ordering_e wo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* drbd_proc.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) extern struct proc_dir_entry *drbd_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int drbd_seq_show(struct seq_file *seq, void *v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* drbd_actlog.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) extern void drbd_al_begin_io_commit(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) extern void drbd_rs_cancel_all(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) extern int drbd_rs_del_all(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) extern void drbd_rs_failed_io(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		sector_t sector, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		enum update_sync_bits_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) #define drbd_set_in_sync(device, sector, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	__drbd_change_sync(device, sector, size, SET_IN_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) #define drbd_set_out_of_sync(device, sector, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	__drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) #define drbd_rs_failed_io(device, sector, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	__drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) extern void drbd_al_shrink(struct drbd_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) extern int drbd_al_initialize(struct drbd_device *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* drbd_nl.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* state info broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct sib_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	enum drbd_state_info_bcast_reason sib_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			char *helper_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			unsigned helper_exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			union drbd_state os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			union drbd_state ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) extern void notify_resource_state(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 				  unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				  struct drbd_resource *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 				  struct resource_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				  enum drbd_notification_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) extern void notify_device_state(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 				unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 				struct drbd_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 				struct device_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 				enum drbd_notification_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) extern void notify_connection_state(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 				    unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 				    struct drbd_connection *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 				    struct connection_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 				    enum drbd_notification_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) extern void notify_peer_device_state(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 				     unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 				     struct drbd_peer_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 				     struct peer_device_info *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 				     enum drbd_notification_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			  struct drbd_connection *, const char *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)  * inline helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)  *************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /* see also page_chain_add and friends in drbd_receiver.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static inline struct page *page_chain_next(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	return (struct page *)page_private(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) #define page_chain_for_each(page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			page = page_chain_next(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) #define page_chain_for_each_safe(page, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	struct page *page = peer_req->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	page_chain_for_each(page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (page_count(page) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static inline union drbd_state drbd_read_state(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	struct drbd_resource *resource = device->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	union drbd_state rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	rv.i = device->state.i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	rv.susp = resource->susp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	rv.susp_nod = resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	rv.susp_fen = resource->susp_fen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) enum drbd_force_detach_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	DRBD_READ_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	DRBD_WRITE_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	DRBD_META_IO_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	DRBD_FORCE_DETACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) static inline void __drbd_chk_io_error_(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		enum drbd_force_detach_flags df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		const char *where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	enum drbd_io_error_p ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	switch (ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			if (__ratelimit(&drbd_ratelimit_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 				drbd_err(device, "Local IO failed in %s.\n", where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			if (device->state.disk > D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		fallthrough;	/* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	case EP_DETACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	case EP_CALL_HELPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		/* Remember whether we saw a READ or WRITE error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		 * Recovery of the affected area for WRITE failure is covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		 * by the activity log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		 * READ errors may fall outside that area though. Certain READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		 * errors can be "healed" by writing good data to the affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		 * blocks, which triggers block re-allocation in lower layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		 * If we can not write the bitmap after a READ error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		 * we may need to trigger a full sync (see w_go_diskless()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		 * Force-detach is not really an IO error, but rather a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		 * desperate measure to try to deal with a completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		 * unresponsive lower level IO stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		 * Still it should be treated as a WRITE error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		 * Meta IO error is always WRITE error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		 * we read meta data only once during attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		 * which will fail in case of errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		set_bit(WAS_IO_ERROR, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		if (df == DRBD_READ_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 			set_bit(WAS_READ_ERROR, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		if (df == DRBD_FORCE_DETACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			set_bit(FORCE_DETACH, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		if (device->state.disk > D_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			drbd_err(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				"Local IO failed in %s. Detaching...\n", where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * @device:	 DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  * @error:	 Error code passed to the IO completion callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static inline void drbd_chk_io_error_(struct drbd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	int error, enum drbd_force_detach_flags forcedetach, const char *where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		spin_lock_irqsave(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		__drbd_chk_io_error_(device, forcedetach, where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  * drbd_md_first_sector() - Returns the first sector number of the meta data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  * @bdev:	Meta data block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * BTW, for internal meta data, this happens to be the maximum capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  * we could agree upon with our peer node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	switch (bdev->md.meta_dev_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	case DRBD_MD_INDEX_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	case DRBD_MD_INDEX_FLEX_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		return bdev->md.md_offset + bdev->md.bm_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	case DRBD_MD_INDEX_FLEX_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		return bdev->md.md_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  * drbd_md_last_sector() - Return the last sector number of the meta data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * @bdev:	Meta data block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	switch (bdev->md.meta_dev_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	case DRBD_MD_INDEX_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	case DRBD_MD_INDEX_FLEX_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		return bdev->md.md_offset + MD_4kB_SECT -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	case DRBD_MD_INDEX_FLEX_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /* Returns the number of 512 byte sectors of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static inline sector_t drbd_get_capacity(struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)  * drbd_get_max_capacity() - Returns the capacity we announce to out peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  * @bdev:	Meta data block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * returns the capacity we announce to out peer.  we clip ourselves at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  * various MAX_SECTORS, because if we don't, current implementation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * oops sooner or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	sector_t s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	switch (bdev->md.meta_dev_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	case DRBD_MD_INDEX_INTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	case DRBD_MD_INDEX_FLEX_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		s = drbd_get_capacity(bdev->backing_bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 				drbd_md_first_sector(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			: 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	case DRBD_MD_INDEX_FLEX_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 				drbd_get_capacity(bdev->backing_bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		/* clip at maximum size the meta device can support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		s = min_t(sector_t, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			BM_EXT_TO_SECT(bdev->md.md_size_sect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 				     - bdev->md.bm_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		s = min_t(sector_t, DRBD_MAX_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 				drbd_get_capacity(bdev->backing_bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * drbd_md_ss() - Return the sector number of our meta data super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  * @bdev:	Meta data block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	const int meta_dev_idx = bdev->md.meta_dev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	/* Since drbd08, internal meta data is always "flexible".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	 * position: last 4k aligned block of 4k size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	/* external, some index; this is the old fixed size layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	spin_lock_irqsave(&q->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	list_add_tail(&w->list, &q->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	spin_unlock_irqrestore(&q->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	wake_up(&q->q_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	spin_lock_irqsave(&q->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (list_empty_careful(&w->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		list_add_tail(&w->list, &q->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	spin_unlock_irqrestore(&q->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	wake_up(&q->q_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) drbd_device_post_work(struct drbd_device *device, int work_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	if (!test_and_set_bit(work_bit, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		struct drbd_connection *connection =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			first_peer_device(device)->connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		struct drbd_work_queue *q = &connection->sender_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			wake_up(&q->q_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /* To get the ack_receiver out of the blocking network stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  * so it can change its sk_rcvtimeo from idle- to ping-timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * and send a ping, we need to send a signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  * Which signal we send is irrelevant. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static inline void wake_ack_receiver(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	struct task_struct *task = connection->ack_receiver.task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (task && get_t_state(&connection->ack_receiver) == RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		send_sig(SIGXCPU, task, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static inline void request_ping(struct drbd_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	set_bit(SEND_PING, &connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	wake_ack_receiver(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			     enum drbd_packet, unsigned int, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			     unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			     enum drbd_packet, unsigned int, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 			     unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) extern int drbd_send_ping(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) extern int drbd_send_ping_ack(struct drbd_connection *connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) static inline void drbd_thread_stop(struct drbd_thread *thi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	_drbd_thread_stop(thi, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	_drbd_thread_stop(thi, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	_drbd_thread_stop(thi, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* counts how many answer packets packets we expect from our peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  * for either explicit application requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)  * or implicit barrier packets as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)  * increased:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)  *  w_send_barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)  *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  *    it is much easier and equally valid to count what we queue for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  *    worker, even before it actually was queued or send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  *    (drbd_make_request_common; recovery path on read io-error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  * decreased:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)  *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  *  _req_mod(req, DATA_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  *     [from receive_DataReply]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  *     for some reason it is NOT decreased in got_NegAck,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)  *     but in the resulting cleanup code from report_params.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  *     we should try to remember the reason for that...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)  *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  *     [from tl_clear_barrier]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) static inline void inc_ap_pending(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	atomic_inc(&device->ap_pending_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) #define ERR_IF_CNT_IS_NEGATIVE(which, func, line)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if (atomic_read(&device->which) < 0)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			func, line,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			atomic_read(&device->which))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	if (atomic_dec_and_test(&device->ap_pending_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		wake_up(&device->misc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) /* counts how many resync-related answers we still expect from the peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)  *		     increase			decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)  * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)  * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)  *					   (or P_NEG_ACK with ID_SYNCER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) static inline void inc_rs_pending(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	atomic_inc(&device->rs_pending_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	atomic_dec(&device->rs_pending_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* counts how many answers we still need to send to the peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  * increased on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  *  receive_Data	unless protocol A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  *			we need to send a P_RECV_ACK (proto B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  *			or P_WRITE_ACK (proto C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  *  receive_Barrier_*	we need to send a P_BARRIER_ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static inline void inc_unacked(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	atomic_inc(&device->unacked_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	atomic_dec(&device->unacked_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	atomic_sub(n, &device->unacked_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) static inline bool is_sync_target_state(enum drbd_conns connection_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	return	connection_state == C_SYNC_TARGET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		connection_state == C_PAUSED_SYNC_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) static inline bool is_sync_source_state(enum drbd_conns connection_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	return	connection_state == C_SYNC_SOURCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		connection_state == C_PAUSED_SYNC_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static inline bool is_sync_state(enum drbd_conns connection_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	return	is_sync_source_state(connection_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		is_sync_target_state(connection_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * @_device:		DRBD device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * @_min_state:		Minimum device state required for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * You have to call put_ldev() when finished working with device->ldev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) #define get_ldev_if_state(_device, _min_state)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	(_get_ldev_if_state((_device), (_min_state)) ?			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	 ({ __acquire(x); true; }) : false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static inline void put_ldev(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	enum drbd_disk_state disk_state = device->state.disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	/* We must check the state *before* the atomic_dec becomes visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	 * or we have a theoretical race where someone hitting zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	 * while state still D_FAILED, will then see D_DISKLESS in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	 * condition below and calling into destroy, where he must not, yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	int i = atomic_dec_return(&device->local_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	/* This may be called from some endio handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	 * so we must not sleep here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	__release(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	D_ASSERT(device, i >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		if (disk_state == D_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 			/* even internal references gone, safe to destroy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 			drbd_device_post_work(device, DESTROY_DISK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		if (disk_state == D_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			/* all application IO references gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 				drbd_device_post_work(device, GO_DISKLESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		wake_up(&device->misc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) #ifndef __CHECKER__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	int io_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	/* never get a reference while D_DISKLESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	if (device->state.disk == D_DISKLESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	atomic_inc(&device->local_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	io_allowed = (device->state.disk >= mins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if (!io_allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		put_ldev(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	return io_allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /* this throttles on-the-fly application requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * according to max_buffers settings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  * maybe re-implement using semaphores? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static inline int drbd_get_max_buffers(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	struct net_conf *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	int mxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	return mxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) static inline int drbd_state_is_stable(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	union drbd_dev_state s = device->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	/* DO NOT add a default clause, we want the compiler to warn us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	 * for any newly introduced state we may have forgotten to add here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	switch ((enum drbd_conns)s.conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	/* new io only accepted when there is no connection, ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	case C_STANDALONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	case C_WF_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	/* ... or there is a well established connection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	case C_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	case C_SYNC_SOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	case C_SYNC_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	case C_VERIFY_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	case C_VERIFY_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	case C_PAUSED_SYNC_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	case C_PAUSED_SYNC_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	case C_AHEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	case C_BEHIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		/* transitional states, IO allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	case C_DISCONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	case C_UNCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	case C_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	case C_BROKEN_PIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	case C_NETWORK_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	case C_PROTOCOL_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	case C_TEAR_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	case C_WF_REPORT_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	case C_STARTING_SYNC_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	case C_STARTING_SYNC_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		/* Allow IO in BM exchange states with new protocols */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	case C_WF_BITMAP_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		if (first_peer_device(device)->connection->agreed_pro_version < 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		/* no new io accepted in these states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	case C_WF_BITMAP_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	case C_WF_SYNC_UUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	case C_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		/* not "stable" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	switch ((enum drbd_disk_state)s.disk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	case D_DISKLESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	case D_INCONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	case D_OUTDATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	case D_CONSISTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	case D_UP_TO_DATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	case D_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		/* disk state is stable as well. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	/* no new io accepted during transitional states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	case D_ATTACHING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	case D_NEGOTIATING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	case D_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	case D_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		/* not "stable" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static inline int drbd_suspended(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	struct drbd_resource *resource = device->resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	return resource->susp || resource->susp_fen || resource->susp_nod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static inline bool may_inc_ap_bio(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	int mxb = drbd_get_max_buffers(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (drbd_suspended(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	if (atomic_read(&device->suspend_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	/* to avoid potential deadlock or bitmap corruption,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	 * in various places, we only allow new application io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	 * to start during "stable" states. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	/* no new io accepted when attaching or detaching the disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	if (!drbd_state_is_stable(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	/* since some older kernels don't have atomic_add_unless,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	 * and we are within the spinlock anyways, we have this workaround.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	if (atomic_read(&device->ap_bio_cnt) > mxb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if (test_bit(BITMAP_IO, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) static inline bool inc_ap_bio_cond(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	bool rv = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	spin_lock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	rv = may_inc_ap_bio(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		atomic_inc(&device->ap_bio_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	spin_unlock_irq(&device->resource->req_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) static inline void inc_ap_bio(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	/* we wait here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	 *    as long as the device is suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	 *    until the bitmap is no longer on the fly during connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	 *    handshake as long as we would exceed the max_buffer limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 * to avoid races with the reconnect code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	 * we need to atomic_inc within the spinlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	wait_event(device->misc_wait, inc_ap_bio_cond(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) static inline void dec_ap_bio(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	int mxb = drbd_get_max_buffers(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	D_ASSERT(device, ap_bio >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 			drbd_queue_work(&first_peer_device(device)->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 				connection->sender_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 				&device->bm_io_work.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	/* this currently does wake_up for every dec_ap_bio!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	 * maybe rather introduce some type of hysteresis?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	if (ap_bio < mxb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		wake_up(&device->misc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static inline bool verify_can_do_stop_sector(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		first_peer_device(device)->connection->agreed_pro_version != 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	int changed = device->ed_uuid != val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	device->ed_uuid = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	return changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static inline int drbd_queue_order_type(struct drbd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	/* sorry, we currently have no working implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	 * of distributed TCQ stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) #ifndef QUEUE_ORDERED_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) #define QUEUE_ORDERED_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	return QUEUE_ORDERED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	return list_first_entry_or_null(&resource->connections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 				struct drbd_connection, connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) #endif