^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe over Fabrics TCP target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2018 Lightbits Labs. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/nvme-tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/llist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* Define the socket priority to use for connections were it is desirable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * that the NIC consider performing optimized packet processing or filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * A non-zero value being sufficient to indicate general consideration of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * possible optimization. Making it a module param allows for alternative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * values that may be unique for some NIC implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int so_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) module_param(so_priority, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define NVMET_TCP_RECV_BUDGET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define NVMET_TCP_SEND_BUDGET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define NVMET_TCP_IO_WORK_BUDGET 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) enum nvmet_tcp_send_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) NVMET_TCP_SEND_DATA_PDU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) NVMET_TCP_SEND_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) NVMET_TCP_SEND_R2T,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) NVMET_TCP_SEND_DDGST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) NVMET_TCP_SEND_RESPONSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) enum nvmet_tcp_recv_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) NVMET_TCP_RECV_PDU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) NVMET_TCP_RECV_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NVMET_TCP_RECV_DDGST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) NVMET_TCP_RECV_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) NVMET_TCP_F_INIT_FAILED = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct nvmet_tcp_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct nvmet_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct nvme_tcp_cmd_pdu *cmd_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct nvme_tcp_rsp_pdu *rsp_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct nvme_tcp_data_pdu *data_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct nvme_tcp_r2t_pdu *r2t_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 rbytes_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 wbytes_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 pdu_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int sg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int nr_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct msghdr recv_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct kvec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct llist_node lentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* send state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct scatterlist *cur_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) enum nvmet_tcp_send_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __le32 exp_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __le32 recv_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum nvmet_tcp_queue_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) NVMET_TCP_Q_CONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) NVMET_TCP_Q_LIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) NVMET_TCP_Q_DISCONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct nvmet_tcp_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct nvmet_tcp_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct work_struct io_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct nvmet_cq nvme_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct nvmet_sq nvme_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* send state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct nvmet_tcp_cmd *cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int nr_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct list_head free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct llist_head resp_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct list_head resp_send_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int send_list_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct nvmet_tcp_cmd *snd_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* recv state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) enum nvmet_tcp_recv_state rcv_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct nvmet_tcp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) union nvme_tcp_pdu pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* digest state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bool hdr_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool data_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct ahash_request *snd_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct ahash_request *rcv_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spinlock_t state_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) enum nvmet_tcp_queue_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct sockaddr_storage sockaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct sockaddr_storage sockaddr_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct work_struct release_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct list_head queue_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct nvmet_tcp_cmd connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct page_frag_cache pf_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void (*data_ready)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void (*state_change)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void (*write_space)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct nvmet_tcp_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct work_struct accept_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct nvmet_port *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct sockaddr_storage addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void (*data_ready)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static DEFINE_IDA(nvmet_tcp_queue_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static LIST_HEAD(nvmet_tcp_queue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct workqueue_struct *nvmet_tcp_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static const struct nvmet_fabrics_ops nvmet_tcp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (unlikely(!queue->nr_cmds)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* We didn't allocate cmds yet, send 0xffff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return cmd - queue->cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return nvme_is_write(cmd->req.cmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cmd->rbytes_done < cmd->req.transfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return !nvme_is_write(cmd->req.cmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) cmd->req.transfer_len > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) !cmd->req.cqe->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) !cmd->rbytes_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline struct nvmet_tcp_cmd *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct nvmet_tcp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cmd = list_first_entry_or_null(&queue->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct nvmet_tcp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) list_del_init(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cmd->rbytes_done = cmd->wbytes_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cmd->pdu_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cmd->pdu_recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) cmd->iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) cmd->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (unlikely(cmd == &cmd->queue->connect))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) list_add_tail(&cmd->entry, &cmd->queue->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static inline int queue_cpu(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return queue->sock->sk->sk_incoming_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void *pdu, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sg_init_one(&sg, pdu, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ahash_request_set_crypt(hash, &sg, pdu + len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) crypto_ahash_digest(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void *pdu, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct nvme_tcp_hdr *hdr = pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) __le32 recv_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) __le32 exp_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_err("queue %d: header digest enabled but no header digest\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) recv_digest = *(__le32 *)(pdu + hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) exp_digest = *(__le32 *)(pdu + hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (recv_digest != exp_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_err("queue %d: header digest error: recv %#x expected %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) queue->idx, le32_to_cpu(recv_digest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) le32_to_cpu(exp_digest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct nvme_tcp_hdr *hdr = pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u8 digest_len = nvmet_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) len = le32_to_cpu(hdr->plen) - hdr->hlen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pr_err("queue %d: data digest flag is cleared\n", queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) sg = &cmd->req.sg[cmd->sg_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for (i = 0; i < cmd->nr_mapped; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kunmap(sg_page(&sg[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct kvec *iov = cmd->iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 length, offset, sg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) length = cmd->pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) offset = cmd->rbytes_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cmd->sg_idx = offset / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sg_offset = offset % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sg = &cmd->req.sg[cmd->sg_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 iov_len = min_t(u32, length, sg->length - sg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) iov->iov_len = iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) length -= iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) iov++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sg_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cmd->nr_mapped, cmd->pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) queue->rcv_state = NVMET_TCP_RECV_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (queue->nvme_sq.ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (status == -EPIPE || status == -ECONNRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 len = le32_to_cpu(sgl->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) NVME_SGL_FMT_OFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!nvme_is_write(cmd->req.cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (len > cmd->req.port->inline_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cmd->pdu_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cmd->req.transfer_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!cmd->req.sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cmd->cur_sg = cmd->req.sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (nvmet_tcp_has_data_in(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) cmd->iov = kmalloc_array(cmd->req.sg_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sizeof(*cmd->iov), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!cmd->iov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) sgl_free(cmd->req.sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ahash_request_set_crypt(hash, cmd->req.sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) crypto_ahash_digest(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct kvec *iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) crypto_ahash_init(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sg_init_one(&sg, iov->iov_base, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) crypto_ahash_update(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) crypto_ahash_final(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cmd->state = NVMET_TCP_SEND_DATA_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pdu->hdr.type = nvme_tcp_c2h_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) NVME_TCP_F_DATA_SUCCESS : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pdu->hdr.hlen = sizeof(*pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) pdu->hdr.plen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) cpu_to_le32(pdu->hdr.hlen + hdgst +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cmd->req.transfer_len + ddgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pdu->command_id = cmd->req.cqe->command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pdu->hdr.flags |= NVME_TCP_F_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (cmd->queue->hdr_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) pdu->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) cmd->state = NVMET_TCP_SEND_R2T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pdu->hdr.type = nvme_tcp_r2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) pdu->hdr.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pdu->hdr.hlen = sizeof(*pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pdu->hdr.pdo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pdu->command_id = cmd->req.cmd->common.command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (cmd->queue->hdr_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pdu->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) cmd->state = NVMET_TCP_SEND_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pdu->hdr.type = nvme_tcp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pdu->hdr.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pdu->hdr.hlen = sizeof(*pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pdu->hdr.pdo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (cmd->queue->hdr_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pdu->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct llist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct nvmet_tcp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) list_add(&cmd->entry, &queue->resp_send_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) queue->send_list_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct nvmet_tcp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!queue->snd_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) nvmet_tcp_process_resp_list(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) queue->snd_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) list_first_entry_or_null(&queue->resp_send_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct nvmet_tcp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (unlikely(!queue->snd_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) list_del_init(&queue->snd_cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) queue->send_list_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (nvmet_tcp_need_data_out(queue->snd_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) nvmet_setup_c2h_data_pdu(queue->snd_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) else if (nvmet_tcp_need_data_in(queue->snd_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) nvmet_setup_r2t_pdu(queue->snd_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) nvmet_setup_response_pdu(queue->snd_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return queue->snd_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void nvmet_tcp_queue_response(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct nvmet_tcp_cmd *cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) container_of(req, struct nvmet_tcp_cmd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct nvme_sgl_desc *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (unlikely(cmd == queue->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) sgl = &cmd->req.cmd->common.dptr.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) len = le32_to_cpu(sgl->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Wait for inline data before processing the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Avoid using helpers, this might happen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * nvmet_req_init is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) len && len <= cmd->req.port->inline_data_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) nvme_is_write(cmd->req.cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) llist_add(&cmd->lentry, &queue->resp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) nvmet_tcp_queue_response(&cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cmd->req.execute(&cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) offset_in_page(cmd->data_pdu) + cmd->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cmd->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) left -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) cmd->state = NVMET_TCP_SEND_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) while (cmd->cur_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct page *page = sg_page(cmd->cur_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 left = cmd->cur_sg->length - cmd->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if ((!last_in_batch && cmd->queue->send_list_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cmd->wbytes_done + left < cmd->req.transfer_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) queue->data_digest || !queue->nvme_sq.sqhd_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) left, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) cmd->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) cmd->wbytes_done += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Done with sg?*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (cmd->offset == cmd->cur_sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) cmd->cur_sg = sg_next(cmd->cur_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cmd->state = NVMET_TCP_SEND_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) cmd->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (queue->nvme_sq.sqhd_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) cmd->queue->snd_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) nvmet_tcp_put_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nvmet_setup_response_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (queue->nvme_sq.sqhd_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) kfree(cmd->iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sgl_free(cmd->req.sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bool last_in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!last_in_batch && cmd->queue->send_list_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) cmd->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) left -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) kfree(cmd->iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sgl_free(cmd->req.sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) cmd->queue->snd_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) nvmet_tcp_put_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!last_in_batch && cmd->queue->send_list_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) cmd->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) left -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) cmd->queue->snd_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct kvec iov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) .iov_len = left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!last_in_batch && cmd->queue->send_list_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) msg.msg_flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) msg.msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cmd->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) left -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (queue->nvme_sq.sqhd_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cmd->queue->snd_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) nvmet_tcp_put_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) nvmet_setup_response_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bool last_in_batch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cmd = nvmet_tcp_fetch_cmd(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (unlikely(!cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret = nvmet_try_send_data_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto done_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (cmd->state == NVMET_TCP_SEND_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ret = nvmet_try_send_data(cmd, last_in_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto done_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (cmd->state == NVMET_TCP_SEND_DDGST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ret = nvmet_try_send_ddgst(cmd, last_in_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto done_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (cmd->state == NVMET_TCP_SEND_R2T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = nvmet_try_send_r2t(cmd, last_in_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto done_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (cmd->state == NVMET_TCP_SEND_RESPONSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ret = nvmet_try_send_response(cmd, last_in_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) done_send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int budget, int *sends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) for (i = 0; i < budget; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) nvmet_tcp_socket_error(queue, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) } else if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (*sends)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) queue->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) queue->left = sizeof(struct nvme_tcp_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) queue->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) queue->rcv_state = NVMET_TCP_RECV_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ahash_request_free(queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ahash_request_free(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct crypto_ahash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (IS_ERR(tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!queue->snd_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto free_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!queue->rcv_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) goto free_snd_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) free_snd_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ahash_request_free(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) free_tfm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct msghdr msg = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pr_err("bad nvme-tcp pdu length (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) le32_to_cpu(icreq->hdr.plen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (icreq->pfv != NVME_TCP_PFV_1_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (icreq->hpda != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) pr_err("queue %d: unsupported hpda %d\n", queue->idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) icreq->hpda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (queue->hdr_digest || queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ret = nvmet_tcp_alloc_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) memset(icresp, 0, sizeof(*icresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) icresp->hdr.type = nvme_tcp_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) icresp->hdr.hlen = sizeof(*icresp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) icresp->hdr.pdo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) icresp->cpda = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (queue->hdr_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) iov.iov_base = icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) iov.iov_len = sizeof(*icresp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto free_crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) queue->state = NVMET_TCP_Q_LIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) free_crypto:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (queue->hdr_digest || queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) nvmet_tcp_free_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!nvme_is_write(cmd->req.cmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) data_len > cmd->req.port->inline_data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ret = nvmet_tcp_map_data(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) pr_err("queue %d: failed to map data\n", queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) queue->rcv_state = NVMET_TCP_RECV_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) nvmet_tcp_map_pdu_iovec(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) cmd->flags |= NVMET_TCP_F_INIT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct nvme_tcp_data_pdu *data = &queue->pdu.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct nvmet_tcp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (likely(queue->nr_cmds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cmd = &queue->cmds[data->ttag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cmd = &queue->connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pr_err("ttag %u unexpected data offset %u (expected %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) data->ttag, le32_to_cpu(data->data_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cmd->rbytes_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* FIXME: use path and transport errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) nvmet_req_complete(&cmd->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) NVME_SC_INVALID_FIELD | NVME_SC_DNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cmd->pdu_len = le32_to_cpu(data->data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) cmd->pdu_recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) nvmet_tcp_map_pdu_iovec(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) queue->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) queue->rcv_state = NVMET_TCP_RECV_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct nvmet_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (hdr->type != nvme_tcp_icreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) pr_err("unexpected pdu type (%d) before icreq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return nvmet_tcp_handle_icreq(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (hdr->type == nvme_tcp_h2c_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = nvmet_tcp_handle_h2c_data_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) queue->cmd = nvmet_tcp_get_cmd(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (unlikely(!queue->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* This should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) queue->idx, queue->nr_cmds, queue->send_list_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) nvme_cmd->common.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) req = &queue->cmd->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) &queue->nvme_sq, &nvmet_tcp_ops))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) req->cmd, req->cmd->common.command_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) req->cmd->common.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) le32_to_cpu(req->cmd->common.dptr.sgl.length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ret = nvmet_tcp_map_data(queue->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pr_err("queue %d: failed to map data\n", queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (nvmet_tcp_has_inline_data(queue->cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) nvmet_req_complete(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (nvmet_tcp_need_data_in(queue->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (nvmet_tcp_has_inline_data(queue->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) queue->rcv_state = NVMET_TCP_RECV_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) nvmet_tcp_map_pdu_iovec(queue->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* send back R2T */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) nvmet_tcp_queue_response(&queue->cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) queue->cmd->req.execute(&queue->cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static const u8 nvme_tcp_pdu_sizes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline u8 nvmet_tcp_pdu_size(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) size_t idx = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) nvme_tcp_pdu_sizes[idx]) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) nvme_tcp_pdu_sizes[idx] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static inline bool nvmet_tcp_pdu_valid(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case nvme_tcp_icreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case nvme_tcp_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) case nvme_tcp_h2c_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* fallthru */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) recv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) iov.iov_base = (void *)&queue->pdu + queue->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) iov.iov_len = queue->left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) iov.iov_len, msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (unlikely(len < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) queue->offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) queue->left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (queue->left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) u8 hdgst = nvmet_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pr_err("unexpected pdu type %d\n", hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) queue->left = hdr->hlen - queue->offset + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (queue->hdr_digest &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) nvmet_tcp_fatal_error(queue); /* fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (queue->data_digest &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) nvmet_tcp_fatal_error(queue); /* fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return nvmet_tcp_done_recv_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) queue->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) queue->left = NVME_TCP_DIGEST_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) queue->rcv_state = NVMET_TCP_RECV_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct nvmet_tcp_cmd *cmd = queue->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) while (msg_data_left(&cmd->recv_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) cmd->recv_msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) cmd->pdu_recv += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) cmd->rbytes_done += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) nvmet_tcp_unmap_pdu_iovec(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) nvmet_tcp_prep_recv_ddgst(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (cmd->rbytes_done == cmd->req.transfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) nvmet_tcp_execute_request(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct nvmet_tcp_cmd *cmd = queue->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct kvec iov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .iov_len = queue->left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) iov.iov_len, msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) queue->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) queue->left -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (queue->left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) queue->idx, cmd->req.cmd->common.command_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) le32_to_cpu(cmd->exp_ddgst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) nvmet_tcp_finish_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) nvmet_tcp_fatal_error(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (cmd->rbytes_done == cmd->req.transfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) nvmet_tcp_execute_request(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) result = nvmet_tcp_try_recv_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (result != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto done_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) result = nvmet_tcp_try_recv_data(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (result != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto done_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) result = nvmet_tcp_try_recv_ddgst(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (result != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) goto done_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) done_recv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (result == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int budget, int *recvs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) for (i = 0; i < budget; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ret = nvmet_tcp_try_recv_one(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) nvmet_tcp_socket_error(queue, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } else if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) (*recvs)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) spin_lock(&queue->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) queue->state = NVMET_TCP_Q_DISCONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) schedule_work(&queue->release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_unlock(&queue->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void nvmet_tcp_io_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct nvmet_tcp_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) container_of(w, struct nvmet_tcp_queue, io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) bool pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) int ret, ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * We exahusted our budget, requeue our selves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct nvmet_tcp_cmd *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) u8 hdgst = nvmet_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) c->queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) c->req.port = queue->port->nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (!c->cmd_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) c->req.cmd = &c->cmd_pdu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!c->rsp_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) goto out_free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) c->req.cqe = &c->rsp_pdu->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) c->data_pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!c->data_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto out_free_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!c->r2t_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) list_add_tail(&c->entry, &queue->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) out_free_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) page_frag_free(c->data_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) out_free_rsp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) page_frag_free(c->rsp_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) out_free_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) page_frag_free(c->cmd_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) page_frag_free(c->r2t_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) page_frag_free(c->data_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) page_frag_free(c->rsp_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) page_frag_free(c->cmd_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct nvmet_tcp_cmd *cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) for (i = 0; i < nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) queue->cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) nvmet_tcp_free_cmd(cmds + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) kfree(cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct nvmet_tcp_cmd *cmds = queue->cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) for (i = 0; i < queue->nr_cmds; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) nvmet_tcp_free_cmd(cmds + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) nvmet_tcp_free_cmd(&queue->connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) kfree(cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct socket *sock = queue->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) write_lock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) sock->sk->sk_data_ready = queue->data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) sock->sk->sk_state_change = queue->state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) sock->sk->sk_write_space = queue->write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) sock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) write_unlock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) nvmet_req_uninit(&cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) nvmet_tcp_unmap_pdu_iovec(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) kfree(cmd->iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) sgl_free(cmd->req.sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct nvmet_tcp_cmd *cmd = queue->cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) for (i = 0; i < queue->nr_cmds; i++, cmd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (nvmet_tcp_need_data_in(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) nvmet_tcp_finish_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /* failed in connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) nvmet_tcp_finish_cmd(&queue->connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static void nvmet_tcp_release_queue_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct nvmet_tcp_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) container_of(w, struct nvmet_tcp_queue, release_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) list_del_init(&queue->queue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) nvmet_tcp_restore_socket_callbacks(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) flush_work(&queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) nvmet_tcp_uninit_data_in_cmds(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) nvmet_sq_destroy(&queue->nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) cancel_work_sync(&queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) sock_release(queue->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) nvmet_tcp_free_cmds(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (queue->hdr_digest || queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) nvmet_tcp_free_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) page = virt_to_head_page(queue->pf_cache.va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) kfree(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static void nvmet_tcp_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (likely(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void nvmet_tcp_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (unlikely(!queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) queue->write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (sk_stream_is_writeable(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void nvmet_tcp_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) case TCP_FIN_WAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) case TCP_CLOSE_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) case TCP_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* FALLTHRU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) nvmet_tcp_schedule_release_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) pr_warn("queue %d unhandled state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) queue->idx, sk->sk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct socket *sock = queue->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct inet_sock *inet = inet_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ret = kernel_getsockname(sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) (struct sockaddr *)&queue->sockaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ret = kernel_getpeername(sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) (struct sockaddr *)&queue->sockaddr_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * Cleanup whatever is sitting in the TCP transmit queue on socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * close. This is done to prevent stale data from being sent should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * the network connection be restored before TCP times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sock_no_linger(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (so_priority > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) sock_set_priority(sock->sk, so_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Set socket type of service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (inet->rcv_tos > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ip_sock_set_tos(sock->sk, inet->rcv_tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) write_lock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (sock->sk->sk_state != TCP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * If the socket is already closing, don't even start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * consuming it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) sock->sk->sk_user_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) queue->data_ready = sock->sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sock->sk->sk_data_ready = nvmet_tcp_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) queue->state_change = sock->sk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sock->sk->sk_state_change = nvmet_tcp_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) queue->write_space = sock->sk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) sock->sk->sk_write_space = nvmet_tcp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) write_unlock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct socket *newsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) queue = kzalloc(sizeof(*queue), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) queue->sock = newsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) queue->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) queue->nr_cmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) spin_lock_init(&queue->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) queue->state = NVMET_TCP_Q_CONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) INIT_LIST_HEAD(&queue->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) init_llist_head(&queue->resp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) INIT_LIST_HEAD(&queue->resp_send_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (queue->idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ret = queue->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) goto out_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) goto out_ida_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ret = nvmet_sq_init(&queue->nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) goto out_free_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) nvmet_prepare_receive_pdu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ret = nvmet_tcp_set_queue_sock(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) goto out_destroy_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) out_destroy_sq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) list_del_init(&queue->queue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) nvmet_sq_destroy(&queue->nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) out_free_connect:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) nvmet_tcp_free_cmd(&queue->connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) out_ida_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) out_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) kfree(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static void nvmet_tcp_accept_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct nvmet_tcp_port *port =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) container_of(w, struct nvmet_tcp_port, accept_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct socket *newsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) pr_warn("failed to accept err=%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) ret = nvmet_tcp_alloc_queue(port, newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) pr_err("failed to allocate queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) sock_release(newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static void nvmet_tcp_listen_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct nvmet_tcp_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) port = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (sk->sk_state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) schedule_work(&port->accept_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static int nvmet_tcp_add_port(struct nvmet_port *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct nvmet_tcp_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) __kernel_sa_family_t af;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) port = kzalloc(sizeof(*port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) switch (nport->disc_addr.adrfam) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) case NVMF_ADDR_FAMILY_IP4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) af = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) case NVMF_ADDR_FAMILY_IP6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) af = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) pr_err("address family %d not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) nport->disc_addr.adrfam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) goto err_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) nport->disc_addr.trsvcid, &port->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) pr_err("malformed ip/port passed: %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) nport->disc_addr.traddr, nport->disc_addr.trsvcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) goto err_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) port->nport = nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (port->nport->inline_data_size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ret = sock_create(port->addr.ss_family, SOCK_STREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) IPPROTO_TCP, &port->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) pr_err("failed to create a socket\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) goto err_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) port->sock->sk->sk_user_data = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) port->data_ready = port->sock->sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sock_set_reuseaddr(port->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) tcp_sock_set_nodelay(port->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (so_priority > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) sock_set_priority(port->sock->sk, so_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sizeof(port->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) pr_err("failed to bind port socket %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) goto err_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) ret = kernel_listen(port->sock, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) pr_err("failed to listen %d on port sock\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) goto err_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) nport->priv = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) pr_info("enabling port %d (%pISpc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) le16_to_cpu(nport->disc_addr.portid), &port->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) err_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) sock_release(port->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) err_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (queue->port == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static void nvmet_tcp_remove_port(struct nvmet_port *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct nvmet_tcp_port *port = nport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) write_lock_bh(&port->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) port->sock->sk->sk_data_ready = port->data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) port->sock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) write_unlock_bh(&port->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) cancel_work_sync(&port->accept_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * Destroy the remaining queues, which are not belong to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * controller yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) nvmet_tcp_destroy_port_queues(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) sock_release(port->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (queue->nvme_sq.ctrl == ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct nvmet_tcp_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) container_of(sq, struct nvmet_tcp_queue, nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (sq->qid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* Let inflight controller teardown complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) queue->nr_cmds = sq->size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (nvmet_tcp_alloc_cmds(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct nvmet_port *nport, char *traddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) struct nvmet_tcp_port *port = nport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) struct nvmet_tcp_cmd *cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) container_of(req, struct nvmet_tcp_cmd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct nvmet_tcp_queue *queue = cmd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) .type = NVMF_TRTYPE_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) .msdbd = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) .add_port = nvmet_tcp_add_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) .remove_port = nvmet_tcp_remove_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) .queue_response = nvmet_tcp_queue_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) .delete_ctrl = nvmet_tcp_delete_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) .install_queue = nvmet_tcp_install_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) .disc_traddr = nvmet_tcp_disc_port_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) static int __init nvmet_tcp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (!nvmet_tcp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ret = nvmet_register_transport(&nvmet_tcp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) destroy_workqueue(nvmet_tcp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static void __exit nvmet_tcp_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct nvmet_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) nvmet_unregister_transport(&nvmet_tcp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) mutex_lock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) mutex_unlock(&nvmet_tcp_queue_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) destroy_workqueue(nvmet_tcp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) module_init(nvmet_tcp_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) module_exit(nvmet_tcp_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */