^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): Long Li <longli@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _SMBDIRECT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _SMBDIRECT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifdef CONFIG_CIFS_SMB_DIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define cifs_rdma_enabled(server) ((server)->rdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "cifsglob.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <rdma/rdma_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) extern int rdma_readwrite_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) extern int smbd_max_frmr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) extern int smbd_keep_alive_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) extern int smbd_max_receive_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) extern int smbd_max_fragmented_recv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) extern int smbd_max_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern int smbd_send_credit_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern int smbd_receive_credit_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) enum keep_alive_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) KEEP_ALIVE_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) KEEP_ALIVE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) KEEP_ALIVE_SENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) enum smbd_connection_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) SMBD_CREATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) SMBD_CONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) SMBD_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) SMBD_NEGOTIATE_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) SMBD_DISCONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) SMBD_DISCONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) SMBD_DESTROYED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The context for the SMBDirect transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Everything related to the transport is here. It has several logical parts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * 1. RDMA related structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * 2. SMBDirect connection parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * 3. Memory registrations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * 4. Receive and reassembly queues for data receive path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * 5. mempools for allocating packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct smbd_connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum smbd_connection_status transport_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* RDMA related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rdma_cm_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct ib_qp_init_attr qp_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct ib_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct ib_cq *send_cq, *recv_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct ib_device_attr dev_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int ri_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct completion ri_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) wait_queue_head_t conn_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) wait_queue_head_t disconn_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct completion negotiate_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bool negotiate_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct work_struct disconnect_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct work_struct post_send_credits_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spinlock_t lock_new_credits_offered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int new_credits_offered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int receive_credit_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int send_credit_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int max_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int max_fragmented_recv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int max_fragmented_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int max_receive_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int keep_alive_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int max_readwrite_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum keep_alive_status keep_alive_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) atomic_t send_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) atomic_t receive_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int receive_credit_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int fragment_reassembly_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Memory registrations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Maximum number of RDMA read/write outstanding on this connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int responder_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Maximum number of SGEs in a RDMA write/read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int max_frmr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * If payload is less than or equal to the threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * use RDMA send/recv to send upper layer I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * If payload is more than the threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * use RDMA read/write through memory registration for I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int rdma_readwrite_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) enum ib_mr_type mr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct list_head mr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spinlock_t mr_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* The number of available MRs ready for memory registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) atomic_t mr_ready_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) atomic_t mr_used_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) wait_queue_head_t wait_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct work_struct mr_recovery_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* Used by transport to wait until all MRs are returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) wait_queue_head_t wait_for_mr_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Activity accoutning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) atomic_t send_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) wait_queue_head_t wait_send_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) wait_queue_head_t wait_post_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Receive queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct list_head receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int count_receive_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spinlock_t receive_queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct list_head empty_packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int count_empty_packet_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spinlock_t empty_packet_queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wait_queue_head_t wait_receive_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Reassembly queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct list_head reassembly_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spinlock_t reassembly_queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) wait_queue_head_t wait_reassembly_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* total data length of reassembly queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int reassembly_data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int reassembly_queue_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* the offset to first buffer in reassembly queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int first_entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bool send_immediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) wait_queue_head_t wait_send_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Indicate if we have received a full packet on the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * This is used to identify the first SMBD packet of a assembled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * payload (SMB packet) in reassembly queue so we can return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * RFC1002 length to upper layer to indicate the length of the SMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * packet received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool full_packet_received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct workqueue_struct *workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct delayed_work idle_timer_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Memory pool for preallocating buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* request pool for RDMA send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct kmem_cache *request_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mempool_t *request_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* response pool for RDMA receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct kmem_cache *response_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) mempool_t *response_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* for debug purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int count_get_receive_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int count_put_receive_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int count_reassembly_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int count_enqueue_reassembly_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int count_dequeue_reassembly_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int count_send_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) enum smbd_message_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) SMBD_NEGOTIATE_RESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) SMBD_TRANSFER_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct smbd_negotiate_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __le16 min_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __le16 max_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) __le16 credits_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __le32 preferred_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __le32 max_receive_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __le32 max_fragmented_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct smbd_negotiate_resp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __le16 min_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __le16 max_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __le16 negotiated_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) __le16 credits_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __le16 credits_granted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __le32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __le32 max_readwrite_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __le32 preferred_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __le32 max_receive_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __le32 max_fragmented_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct smbd_data_transfer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) __le16 credits_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __le16 credits_granted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __le16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __le32 remaining_data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __le32 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __le32 data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __le32 padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __u8 buffer[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* The packet fields for a registered RDMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct smbd_buffer_descriptor_v1 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __le64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __le32 token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __le32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Default maximum number of SGEs in a RDMA send/recv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define SMBDIRECT_MAX_SGE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* The context for a SMBD request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct smbd_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct smbd_connection *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct ib_cqe cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* the SGE entries for this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct ib_sge sge[SMBDIRECT_MAX_SGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* SMBD packet header follows this structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u8 packet[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* The context for a SMBD response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct smbd_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct smbd_connection *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct ib_cqe cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ib_sge sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) enum smbd_message_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Link to receive queue or reassembly queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Indicate if this is the 1st packet of a payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool first_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* SMBD packet header and payload follows this structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u8 packet[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Create a SMBDirect session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct smbd_connection *smbd_get_connection(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct TCP_Server_Info *server, struct sockaddr *dstaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Reconnect SMBDirect session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int smbd_reconnect(struct TCP_Server_Info *server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Destroy SMBDirect session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void smbd_destroy(struct TCP_Server_Info *server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Interface for carrying upper layer I/O through send/recv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int smbd_send(struct TCP_Server_Info *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int num_rqst, struct smb_rqst *rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) enum mr_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) MR_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) MR_REGISTERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) MR_INVALIDATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) MR_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct smbd_mr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct smbd_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) enum mr_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct ib_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct ib_reg_wr wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ib_send_wr inv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct ib_cqe cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bool need_invalidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct completion invalidate_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Interfaces to register and deregister MR for RDMA read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct smbd_mr *smbd_register_mr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct smbd_connection *info, struct page *pages[], int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int offset, int tailsz, bool writing, bool need_invalidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int smbd_deregister_mr(struct smbd_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define cifs_rdma_enabled(server) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct smbd_connection {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static inline void *smbd_get_connection(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline void smbd_destroy(struct TCP_Server_Info *server) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #endif