Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Definitions for the SMC module (socket related)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef __SMC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define __SMC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/compiler.h> /* __aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "smc_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define SMC_V1		1		/* SMC version V1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define SMC_V2		2		/* SMC version V2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define SMC_RELEASE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define SMCPROTO_SMC		0	/* SMC protocol, IPv4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define SMCPROTO_SMC6		1	/* SMC protocol, IPv6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define SMC_MAX_ISM_DEVS	8	/* max # of proposed non-native ISM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 					 * devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define SMC_MAX_HOSTNAME_LEN	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SMC_MAX_EID_LEN		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) extern struct proto smc_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) extern struct proto smc_proto6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #ifdef ATOMIC64_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) enum smc_state {		/* possible states of an SMC socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	SMC_ACTIVE	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	SMC_INIT	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	SMC_CLOSED	= 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	SMC_LISTEN	= 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/* normal close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	SMC_PEERCLOSEWAIT1	= 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	SMC_PEERCLOSEWAIT2	= 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	SMC_APPFINCLOSEWAIT	= 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	SMC_APPCLOSEWAIT1	= 22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	SMC_APPCLOSEWAIT2	= 23,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	SMC_PEERFINCLOSEWAIT	= 25,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* abnormal close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	SMC_PEERABORTWAIT	= 26,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	SMC_PROCESSABORT	= 27,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct smc_link_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct smc_wr_rx_hdr {	/* common prefix part of LLC and CDC to demultiplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u8			type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) } __aligned(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct smc_cdc_conn_state_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #if defined(__BIG_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u8	peer_done_writing : 1;	/* Sending done indicator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	u8	peer_conn_closed : 1;	/* Peer connection closed indicator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u8	peer_conn_abort : 1;	/* Abnormal close indicator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u8	reserved : 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #elif defined(__LITTLE_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u8	reserved : 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u8	peer_conn_abort : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	u8	peer_conn_closed : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u8	peer_done_writing : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) struct smc_cdc_producer_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #if defined(__BIG_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	u8	write_blocked : 1;	/* Writing Blocked, no rx buf space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u8	urg_data_pending : 1;	/* Urgent Data Pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u8	urg_data_present : 1;	/* Urgent Data Present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u8	cons_curs_upd_req : 1;	/* cursor update requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	u8	failover_validation : 1;/* message replay due to failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	u8	reserved : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #elif defined(__LITTLE_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	u8	reserved : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u8	failover_validation : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u8	cons_curs_upd_req : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u8	urg_data_present : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u8	urg_data_pending : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	u8	write_blocked : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /* in host byte order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) union smc_host_cursor {	/* SMC cursor - an offset in an RMBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		u16	reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		u16	wrap;		/* window wrap sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		u32	count;		/* cursor (= offset) part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	atomic64_t		acurs;	/* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	u64			acurs;	/* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* in host byte order, except for flag bitfields in network byte order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct smc_host_cdc_msg {		/* Connection Data Control message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct smc_wr_rx_hdr		common; /* .type = 0xFE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	u8				len;	/* length = 44 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	u16				seqno;	/* connection seq # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	u32				token;	/* alert_token */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	union smc_host_cursor		prod;		/* producer cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	union smc_host_cursor		cons;		/* consumer cursor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 							 * piggy backed "ack"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 							 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct smc_cdc_producer_flags	prod_flags;	/* conn. tx/rx status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct smc_cdc_conn_state_flags	conn_state_flags; /* peer conn. status*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	u8				reserved[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) enum smc_urg_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	SMC_URG_VALID	= 1,			/* data present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	SMC_URG_NOTYET	= 2,			/* data pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	SMC_URG_READ	= 3,			/* data was already read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct smc_connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct rb_node		alert_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct smc_link_group	*lgr;		/* link group of connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct smc_link		*lnk;		/* assigned SMC-R link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u32			alert_token_local; /* unique conn. id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	u8			peer_rmbe_idx;	/* from tcp handshake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int			peer_rmbe_size;	/* size of peer rx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	atomic_t		peer_rmbe_space;/* remaining free bytes in peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 						 * rmbe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	int			rtoken_idx;	/* idx to peer RMB rkey/addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct smc_buf_desc	*sndbuf_desc;	/* send buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct smc_buf_desc	*rmb_desc;	/* RMBE descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	int			rmbe_size_short;/* compressed notation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	int			rmbe_update_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 						/* lower limit for consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 						 * cursor update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct smc_host_cdc_msg	local_tx_ctrl;	/* host byte order staging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 						 * buffer for CDC msg send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 						 * .prod cf. TCP snd_nxt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 						 * .cons cf. TCP sends ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	union smc_host_cursor	local_tx_ctrl_fin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 						/* prod crsr - confirmed by peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	union smc_host_cursor	tx_curs_prep;	/* tx - prepared data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 						 * snd_max..wmem_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	union smc_host_cursor	tx_curs_sent;	/* tx - sent data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 						 * snd_nxt ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	union smc_host_cursor	tx_curs_fin;	/* tx - confirmed by peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 						 * snd-wnd-begin ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	atomic_t		sndbuf_space;	/* remaining space in sndbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u16			tx_cdc_seq;	/* sequence # for CDC send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	u16			tx_cdc_seq_fin;	/* sequence # - tx completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	spinlock_t		send_lock;	/* protect wr_sends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	atomic_t		cdc_pend_tx_wr; /* number of pending tx CDC wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 						 * - inc when post wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 						 * - dec on polled tx cqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	wait_queue_head_t	cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct delayed_work	tx_work;	/* retry of smc_cdc_msg_send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u32			tx_off;		/* base offset in peer rmb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct smc_host_cdc_msg	local_rx_ctrl;	/* filled during event_handl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 						 * .prod cf. TCP rcv_nxt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 						 * .cons cf. TCP snd_una
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	union smc_host_cursor	rx_curs_confirmed; /* confirmed to peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 						    * source of snd_una ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 						    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	union smc_host_cursor	urg_curs;	/* points at urgent byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	enum smc_urg_state	urg_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	bool			urg_tx_pend;	/* urgent data staged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	bool			urg_rx_skip_pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 						/* indicate urgent oob data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 						 * read, but previous regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 						 * data still pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	char			urg_rx_byte;	/* urgent byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	atomic_t		bytes_to_rcv;	/* arrived data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 						 * not yet received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	atomic_t		splice_pending;	/* number of spliced bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 						 * pending processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #ifndef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	spinlock_t		acurs_lock;	/* protect cursors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct work_struct	close_work;	/* peer sent some closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct work_struct	abort_work;	/* abort the connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct tasklet_struct	rx_tsklet;	/* Receiver tasklet for SMC-D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	u8			rx_off;		/* receive offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 						 * 0 for SMC-R, 32 for SMC-D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	u64			peer_token;	/* SMC-D token of peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	u8			killed : 1;	/* abnormal termination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	u8			out_of_sync : 1; /* out of sync with peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct smc_sock {				/* smc sock container */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct sock		sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct socket		*clcsock;	/* internal tcp socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	void			(*clcsk_data_ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 						/* original data_ready fct. **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct smc_connection	conn;		/* smc connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct smc_sock		*listen_smc;	/* listen parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct work_struct	connect_work;	/* handle non-blocking connect*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct work_struct	tcp_listen_work;/* handle tcp socket accepts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct work_struct	smc_listen_work;/* prepare new accept socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct list_head	accept_q;	/* sockets to be accepted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	spinlock_t		accept_q_lock;	/* protects accept_q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	bool			use_fallback;	/* fallback to tcp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	int			fallback_rsn;	/* reason for fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	u32			peer_diagnosis; /* decline reason from peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int			sockopt_defer_accept;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 						/* sockopt TCP_DEFER_ACCEPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 						 * value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	u8			wait_close_tx_prepared : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 						/* shutdown wr or close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 						 * started, waiting for unsent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 						 * data to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	u8			connect_nonblock : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 						/* non-blocking connect in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 						 * flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 						 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct mutex            clcsock_release_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 						/* protects clcsock of a listen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 						 * socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 						 * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline struct smc_sock *smc_sk(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	return (struct smc_sock *)sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) extern struct workqueue_struct	*smc_hs_wq;	/* wq for handshake work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) extern struct workqueue_struct	*smc_close_wq;	/* wq for close work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define SMC_SYSTEMID_LEN		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) extern u8	local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define ntohll(x) be64_to_cpu(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define htonll(x) cpu_to_be64(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* convert an u32 value into network byte order, store it into a 3 byte field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline void hton24(u8 *net, u32 host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	__be32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	t = cpu_to_be32(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	memcpy(net, ((u8 *)&t) + 1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* convert a received 3 byte field into host byte order*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static inline u32 ntoh24(u8 *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	__be32 t = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	memcpy(((u8 *)&t) + 1, net, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	return be32_to_cpu(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef CONFIG_XFRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline bool using_ipsec(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return (smc->clcsock->sk->sk_policy[0] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		smc->clcsock->sk->sk_policy[1]) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline bool using_ipsec(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void smc_close_non_accepted(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #endif	/* __SMC_H */