Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __DRBD_PROTOCOL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __DRBD_PROTOCOL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) enum drbd_packet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 	/* receiver (data socket) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 	P_DATA		      = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 	P_DATA_REPLY	      = 0x01, /* Response to P_DATA_REQUEST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 	P_RS_DATA_REPLY	      = 0x02, /* Response to P_RS_DATA_REQUEST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 	P_BARRIER	      = 0x03,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	P_BITMAP	      = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	P_BECOME_SYNC_TARGET  = 0x05,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	P_BECOME_SYNC_SOURCE  = 0x06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	P_UNPLUG_REMOTE	      = 0x07, /* Used at various times to hint the peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 	P_DATA_REQUEST	      = 0x08, /* Used to ask for a data block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	P_RS_DATA_REQUEST     = 0x09, /* Used to ask for a data block for resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	P_SYNC_PARAM	      = 0x0a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	P_PROTOCOL	      = 0x0b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	P_UUIDS		      = 0x0c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	P_SIZES		      = 0x0d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	P_STATE		      = 0x0e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	P_SYNC_UUID	      = 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	P_AUTH_CHALLENGE      = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	P_AUTH_RESPONSE	      = 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	P_STATE_CHG_REQ	      = 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	/* (meta socket) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	P_PING		      = 0x13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	P_PING_ACK	      = 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	P_RECV_ACK	      = 0x15, /* Used in protocol B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	P_WRITE_ACK	      = 0x16, /* Used in protocol C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	P_RS_WRITE_ACK	      = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	P_SUPERSEDED	      = 0x18, /* Used in proto C, two-primaries conflict detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	P_NEG_ACK	      = 0x19, /* Sent if local disk is unusable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	P_NEG_DREPLY	      = 0x1a, /* Local disk is broken... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	P_NEG_RS_DREPLY	      = 0x1b, /* Local disk is broken... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	P_BARRIER_ACK	      = 0x1c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	P_STATE_CHG_REPLY     = 0x1d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* "new" commands, no longer fitting into the ordering scheme above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	P_OV_REQUEST	      = 0x1e, /* data socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	P_OV_REPLY	      = 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	P_OV_RESULT	      = 0x20, /* meta socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	P_CSUM_RS_REQUEST     = 0x21, /* data socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	P_RS_IS_IN_SYNC	      = 0x22, /* meta socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	P_SYNC_PARAM89	      = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	P_COMPRESSED_BITMAP   = 0x24, /* compressed or otherwise encoded bitmap transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	/* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	P_OUT_OF_SYNC         = 0x28, /* Mark as out of sync (Outrunning), data socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	P_RS_CANCEL           = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	P_CONN_ST_CHG_REQ     = 0x2a, /* data sock: Connection wide state request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	P_CONN_ST_CHG_REPLY   = 0x2b, /* meta sock: Connection side state req reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	P_RETRY_WRITE	      = 0x2c, /* Protocol C: retry conflicting write request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	P_PROTOCOL_UPDATE     = 0x2d, /* data sock: is used in established connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)         /* 0x2e to 0x30 reserved, used in drbd 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/* REQ_OP_DISCARD. We used "discard" in different contexts before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * which is why I chose TRIM here, to disambiguate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	P_TRIM                = 0x31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/* Only use these two if both support FF_THIN_RESYNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	P_RS_THIN_REQ         = 0x32, /* Request a block for resync or reply P_RS_DEALLOCATED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	P_RS_DEALLOCATED      = 0x33, /* Contains only zeros on sync source node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/* REQ_WRITE_SAME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * On a receiving side without REQ_WRITE_SAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * we may fall back to an opencoded loop instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	P_WSAME               = 0x34,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* 0x35 already claimed in DRBD 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	P_ZEROES              = 0x36, /* data sock: zero-out, WRITE_ZEROES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	/* 0x40 .. 0x48 already claimed in DRBD 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	P_MAY_IGNORE	      = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	P_MAX_OPT_CMD	      = 0x101,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* special command ids for handshake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	P_INITIAL_META	      = 0xfff1, /* First Packet on the MetaSock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	P_INITIAL_DATA	      = 0xfff2, /* First Packet on the Socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	P_CONNECTION_FEATURES = 0xfffe	/* FIXED for the next century! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #ifndef __packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define __packed __attribute__((packed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /* This is the layout for a packet on the wire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * The byteorder is the network byte order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *     (except block_id and barrier fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *	these are pointers to local structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *	and have no relevance for the partner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *	which just echoes them as received.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * NOTE that the payload starts at a long aligned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * regardless of 32 or 64 bit arch!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct p_header80 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u32	  magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u16	  command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	u16	  length;	/* bytes of data after this header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Header for big packets, Used for data packets exceeding 64kB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct p_header95 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	u16	  magic;	/* use DRBD_MAGIC_BIG here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	u16	  command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	u32	  length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct p_header100 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u32	  magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u16	  volume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u16	  command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u32	  length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	u32	  pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* These defines must not be changed without changing the protocol version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * New defines may only be introduced together with protocol version bump or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * new protocol feature flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define DP_HARDBARRIER	      1 /* no longer used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define DP_RW_SYNC	      2 /* equals REQ_SYNC    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define DP_MAY_SET_IN_SYNC    4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define DP_UNPLUG             8 /* not used anymore   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define DP_FUA               16 /* equals REQ_FUA     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define DP_FLUSH             32 /* equals REQ_PREFLUSH   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define DP_DISCARD           64 /* equals REQ_OP_DISCARD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define DP_SEND_WRITE_ACK   256 /* This is a proto C write request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define DP_WSAME            512 /* equiv. REQ_WRITE_SAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define DP_ZEROES          1024 /* equiv. REQ_OP_WRITE_ZEROES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* possible combinations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * REQ_OP_WRITE_ZEROES:  DP_DISCARD | DP_ZEROES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * REQ_OP_WRITE_ZEROES + REQ_NOUNMAP: DP_ZEROES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct p_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	u64	    sector;    /* 64 bits sector number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	u64	    block_id;  /* to identify the request in protocol B&C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u32	    seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	u32	    dp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct p_trim {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct p_data p_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	u32	    size;	/* == bio->bi_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct p_wsame {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	struct p_data p_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	u32           size;     /* == bio->bi_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * commands which share a struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *  p_block_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *   P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *   P_SUPERSEDED (proto C, two-primaries conflict detection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *  p_block_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *   P_DATA_REQUEST, P_RS_DATA_REQUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct p_block_ack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	u64	    sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	u64	    block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	u32	    blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u32	    seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct p_block_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u64 sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u64 block_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	u32 blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	u32 pad;	/* to multiple of 8 Byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * commands with their own struct for additional fields:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *   P_CONNECTION_FEATURES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *   P_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *   P_BARRIER_ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  *   P_SYNC_PARAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *   ReportParams
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* supports TRIM/DISCARD on the "wire" protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define DRBD_FF_TRIM 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * instead of fully allocate a supposedly thin volume on initial resync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define DRBD_FF_THIN_RESYNC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* supports REQ_WRITE_SAME on the "wire" protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * Note: this flag is overloaded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * its presence also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *   - indicates support for 128 MiB "batch bios",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *     max discard size of 128 MiB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *     instead of 4M before that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *   - indicates that we exchange additional settings in p_sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *     drbd_send_sizes()/receive_sizes()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define DRBD_FF_WSAME 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* supports REQ_OP_WRITE_ZEROES on the "wire" protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * We used to map that to "discard" on the sending side, and if we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * guarantee that discard zeroes data, the receiving side would map discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * back to zero-out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  * With the introduction of REQ_OP_WRITE_ZEROES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * we started to use that for both WRITE_ZEROES and DISCARDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * hoping that WRITE_ZEROES would "do what we want",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * UNMAP if possible, zero-out the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * The example scenario is some LVM "thin" backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * While an un-allocated block on dm-thin reads as zeroes, on a dm-thin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * with "skip_block_zeroing=true", after a partial block write allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * that block, that same block may well map "undefined old garbage" from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * the backends on LBAs that have not yet been written to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * If we cannot distinguish between zero-out and discard on the receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * side, to avoid "undefined old garbage" to pop up randomly at later times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * on supposedly zero-initialized blocks, we'd need to map all discards to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * zero-out on the receiving side.  But that would potentially do a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * alloc on thinly provisioned backends, even when the expectation was to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * unmap/trim/discard/de-allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * We need to distinguish on the protocol level, whether we need to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * zeroes (and thus use zero-out, potentially doing the mentioned full-alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * or if we want to put the emphasis on discard, and only do a "best effort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * zeroing" (by "discarding" blocks aligned to discard-granularity, and zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * only potential unaligned head and tail clippings), to at least *try* to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * avoid "false positives" in an online-verify later, hoping that someone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * set skip_block_zeroing=false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define DRBD_FF_WZEROES 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct p_connection_features {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	u32 protocol_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	u32 feature_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	u32 protocol_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/* should be more than enough for future enhancements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * for now, feature_flags and the reserved array shall be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	u32 _pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	u64 reserved[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct p_barrier {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	u32 barrier;	/* barrier number _handle_ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	u32 pad;	/* to multiple of 8 Byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct p_barrier_ack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	u32 barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	u32 set_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct p_rs_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	u32 resync_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	      /* Since protocol version 88 and higher. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	char verify_alg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct p_rs_param_89 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	u32 resync_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	/* protocol version 89: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	char verify_alg[SHARED_SECRET_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	char csums_alg[SHARED_SECRET_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct p_rs_param_95 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	u32 resync_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	char verify_alg[SHARED_SECRET_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	char csums_alg[SHARED_SECRET_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	u32 c_plan_ahead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	u32 c_delay_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	u32 c_fill_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	u32 c_max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) enum drbd_conn_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	CF_DISCARD_MY_DATA = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	CF_DRY_RUN = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct p_protocol {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	u32 protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	u32 after_sb_0p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	u32 after_sb_1p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	u32 after_sb_2p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	u32 conn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	u32 two_primaries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	/* Since protocol version 87 and higher. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	char integrity_alg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct p_uuids {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	u64 uuid[UI_EXTENDED_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct p_rs_uuid {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	u64	    uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* optional queue_limits if (agreed_features & DRBD_FF_WSAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * see also struct queue_limits, as of late 2015 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct o_qlim {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/* we don't need it yet, but we may as well communicate it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	u32 physical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	/* so the original in struct queue_limits is unsigned short,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * but I'd have to put in padding anyways. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	u32 logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	/* One incoming bio becomes one DRBD request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 * which may be translated to several bio on the receiving side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 * We don't need to communicate chunk/boundary/segment ... limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* various IO hints may be useful with "diskless client" setups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	u32 alignment_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	u32 io_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	u32 io_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	/* We may need to communicate integrity stuff at some point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	 * but let's not get ahead of ourselves. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/* Backend discard capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	 * Receiving side uses "blkdev_issue_discard()", no need to communicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * more specifics.  If the backend cannot do discards, the DRBD peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 * may fall back to blkdev_issue_zeroout().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	u8 discard_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	u8 discard_zeroes_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	u8 write_same_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	u8 _pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct p_sizes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	u64	    d_size;  /* size of disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	u64	    u_size;  /* user requested size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	u64	    c_size;  /* current exported size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	u32	    max_bio_size;  /* Maximal size of a BIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	u16	    queue_order_type;  /* not yet implemented in DRBD*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	u16	    dds_flags; /* use enum dds_flags here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	/* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct o_qlim qlim[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct p_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	u32	    state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct p_req_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u32	    mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	u32	    val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct p_req_state_reply {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	u32	    retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct p_drbd06_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	u64	  size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	u32	  state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	u32	  blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	u32	  protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	u32	  version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	u32	  gen_cnt[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	u32	  bit_map_gen[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct p_block_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	u64 sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	u32 blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	u32 pad;	/* to multiple of 8 Byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Valid values for the encoding field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * Bump proto version when changing this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) enum drbd_bitmap_code {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/* RLE_VLI_Bytes = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * and other bit variants had been defined during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 * algorithm evaluation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	RLE_VLI_Bits = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct p_compressed_bm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * (encoding & 0x80): polarity (set/unset) of first runlength
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * used to pad up to head.length bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	u8 encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	u8 code[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct p_delay_probe93 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	u32     seq_num; /* sequence number to match the two probe packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	u32     offset;  /* usecs the probe got sent after the reference time point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * Bitmap packets need to fit within a single page on the sender and receiver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #define DRBD_SOCKET_BUFFER_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif  /* __DRBD_PROTOCOL_H */