Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * NVMe over Fabrics TCP host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/nvme-tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <net/busy_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "fabrics.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) struct nvme_tcp_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) /* Define the socket priority to use for connections were it is desirable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * that the NIC consider performing optimized packet processing or filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * A non-zero value being sufficient to indicate general consideration of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * possible optimization.  Making it a module param allows for alternative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * values that may be unique for some NIC implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int so_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) module_param(so_priority, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /* lockdep can detect a circular dependency of the form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * because dependencies are tracked for both nvme-tcp and user contexts. Using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * a separate class prevents lockdep from conflating nvme-tcp socket use with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * user-space socket API use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static struct lock_class_key nvme_tcp_sk_key[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static struct lock_class_key nvme_tcp_slock_key[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static void nvme_tcp_reclassify_socket(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	switch (sk->sk_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 					      &nvme_tcp_slock_key[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 					      "sk_lock-AF_INET-NVME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 					      &nvme_tcp_sk_key[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 					      &nvme_tcp_slock_key[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 					      "sk_lock-AF_INET6-NVME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 					      &nvme_tcp_sk_key[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void nvme_tcp_reclassify_socket(struct socket *sock) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) enum nvme_tcp_send_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	NVME_TCP_SEND_CMD_PDU = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	NVME_TCP_SEND_H2C_PDU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	NVME_TCP_SEND_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	NVME_TCP_SEND_DDGST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) struct nvme_tcp_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct nvme_request	req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	void			*pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct nvme_tcp_queue	*queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	u32			data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	u32			pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	u32			pdu_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	u16			ttag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct list_head	entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	struct llist_node	lentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	__le32			ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct bio		*curr_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct iov_iter		iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* send state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	size_t			offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	size_t			data_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	enum nvme_tcp_send_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) enum nvme_tcp_queue_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	NVME_TCP_Q_ALLOCATED	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	NVME_TCP_Q_LIVE		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	NVME_TCP_Q_POLLING	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) enum nvme_tcp_recv_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	NVME_TCP_RECV_PDU = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	NVME_TCP_RECV_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	NVME_TCP_RECV_DDGST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) struct nvme_tcp_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) struct nvme_tcp_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	struct socket		*sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct work_struct	io_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	int			io_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct mutex		queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct mutex		send_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct llist_head	req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct list_head	send_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	bool			more_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	/* recv state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	void			*pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	int			pdu_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	int			pdu_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	size_t			data_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	size_t			ddgst_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	unsigned int		nr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	/* send state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct nvme_tcp_request *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	int			queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	size_t			cmnd_capsule_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	struct nvme_tcp_ctrl	*ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	bool			rd_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	bool			hdr_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	bool			data_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct ahash_request	*rcv_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	struct ahash_request	*snd_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	__le32			exp_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	__le32			recv_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct page_frag_cache	pf_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	void (*state_change)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	void (*data_ready)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	void (*write_space)(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) struct nvme_tcp_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	/* read only in the hot path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct nvme_tcp_queue	*queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct blk_mq_tag_set	tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* other member variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct blk_mq_tag_set	admin_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct sockaddr_storage addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct sockaddr_storage src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct nvme_ctrl	ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct work_struct	err_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct delayed_work	connect_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct nvme_tcp_request async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	u32			io_queues[HCTX_MAX_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static LIST_HEAD(nvme_tcp_ctrl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static struct workqueue_struct *nvme_tcp_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static const struct blk_mq_ops nvme_tcp_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return queue - queue->ctrl->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	u32 queue_idx = nvme_tcp_queue_id(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (queue_idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		return queue->ctrl->admin_tag_set.tags[queue_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	return queue->ctrl->tag_set.tags[queue_idx - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	return req == &req->queue->ctrl->async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	if (unlikely(nvme_tcp_async_req(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return false; /* async events don't have a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	rq = blk_mq_rq_from_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	return rq_data_dir(rq) == WRITE && req->data_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return req->iter.bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return min_t(size_t, iov_iter_single_seg_count(&req->iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			req->pdu_len - req->pdu_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	return req->iter.iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			req->pdu_len - req->pdu_sent : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	return nvme_tcp_pdu_data_left(req) <= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		unsigned int dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct request *rq = blk_mq_rq_from_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct bio_vec *vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	int nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		vec = &rq->special_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		nsegs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		size = blk_rq_payload_bytes(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		struct bio *bio = req->curr_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		nsegs = bio_segments(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		size = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		offset = bio->bi_iter.bi_bvec_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	req->iter.iov_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	req->data_sent += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	req->pdu_sent += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	iov_iter_advance(&req->iter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (!iov_iter_count(&req->iter) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	    req->data_sent < req->data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		req->curr_bio = req->curr_bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		nvme_tcp_init_iter(req, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* drain the send queue as much as we can... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		ret = nvme_tcp_try_send(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	} while (ret > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	return !list_empty(&queue->send_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		!llist_empty(&queue->req_list) || queue->more_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		bool sync, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	bool empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	empty = llist_add(&req->lentry, &queue->req_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		list_empty(&queue->send_list) && !queue->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * if we're the first on the send_list and we can try to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * directly, otherwise queue io_work. Also, only do that if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * are on the same cpu, so we don't introduce contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (queue->io_cpu == raw_smp_processor_id() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		queue->more_requests = !last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		nvme_tcp_send_all(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		queue->more_requests = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		mutex_unlock(&queue->send_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (last && nvme_tcp_queue_more(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct nvme_tcp_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct llist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		req = llist_entry(node, struct nvme_tcp_request, lentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		list_add(&req->entry, &queue->send_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static inline struct nvme_tcp_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	struct nvme_tcp_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	req = list_first_entry_or_null(&queue->send_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			struct nvme_tcp_request, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		nvme_tcp_process_req_list(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		req = list_first_entry_or_null(&queue->send_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				struct nvme_tcp_request, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		if (unlikely(!req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	list_del(&req->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		__le32 *dgst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	crypto_ahash_final(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		struct page *page, off_t off, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	sg_init_marker(&sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	sg_set_page(&sg, page, len, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	ahash_request_set_crypt(hash, &sg, NULL, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	crypto_ahash_update(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) static inline void nvme_tcp_hdgst(struct ahash_request *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		void *pdu, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	sg_init_one(&sg, pdu, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	crypto_ahash_digest(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		void *pdu, size_t pdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct nvme_tcp_hdr *hdr = pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	__le32 recv_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	__le32 exp_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			"queue %d: header digest flag is cleared\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (recv_digest != exp_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			"header digest error: recv %#x expected %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	struct nvme_tcp_hdr *hdr = pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	u8 digest_len = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			"queue %d: data digest flag is cleared\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	crypto_ahash_init(queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		struct request *rq, unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	page_frag_free(req->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		struct request *rq, unsigned int hctx_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	req->pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (!req->pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	req->queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	nvme_req(rq)->ctrl = &ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct nvme_tcp_ctrl *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	hctx->driver_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	struct nvme_tcp_ctrl *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	hctx->driver_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static enum nvme_tcp_recv_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		NVME_TCP_RECV_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	queue->pdu_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	queue->data_remaining = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	queue->ddgst_remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	dev_warn(ctrl->device, "starting error recovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		struct nvme_completion *cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (!rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			"got bad cqe.command_id %#x on queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			cqe->command_id, nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		nvme_complete_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	queue->nr_cqe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		struct nvme_tcp_data_pdu *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (!rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			"got bad c2hdata.command_id %#x on queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			pdu->command_id, nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	if (!blk_rq_payload_bytes(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			"queue %d tag %#x unexpected data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			nvme_tcp_queue_id(queue), rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	queue->data_remaining = le32_to_cpu(pdu->data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			"queue %d tag %#x SUCCESS set but not last PDU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			nvme_tcp_queue_id(queue), rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		struct nvme_tcp_rsp_pdu *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct nvme_completion *cqe = &pdu->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 * AEN requests are special as they don't time out and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * survive any kind of queue freeze and often don't respond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * aborts.  We don't even bother to allocate a struct request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * for them but rather special case them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				     cqe->command_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				&cqe->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		struct nvme_tcp_r2t_pdu *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	struct nvme_tcp_data_pdu *data = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct request *rq = blk_mq_rq_from_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	u8 ddgst = nvme_tcp_ddgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	req->pdu_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (unlikely(!req->pdu_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			"req %d r2t len is %u, probably a bug...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			rq->tag, req->pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			rq->tag, req->pdu_len, req->data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			req->data_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			"req %d unexpected r2t offset %u (expected %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			rq->tag, le32_to_cpu(pdu->r2t_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			req->data_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	data->hdr.type = nvme_tcp_h2c_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (queue->hdr_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		data->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		data->hdr.flags |= NVME_TCP_F_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	data->hdr.hlen = sizeof(*data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	data->hdr.pdo = data->hdr.hlen + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	data->hdr.plen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	data->ttag = pdu->ttag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	data->command_id = nvme_cid(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	data->data_offset = pdu->r2t_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	data->data_length = cpu_to_le32(req->pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		struct nvme_tcp_r2t_pdu *pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	struct nvme_tcp_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			"got bad r2t.command_id %#x on queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			pdu->command_id, nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	req->state = NVME_TCP_SEND_H2C_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	req->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	nvme_tcp_queue_request(req, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		unsigned int *offset, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	struct nvme_tcp_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	char *pdu = queue->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	ret = skb_copy_bits(skb, *offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		&pdu[queue->pdu_offset], rcv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	queue->pdu_remaining -= rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	queue->pdu_offset += rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	*offset += rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	*len -= rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (queue->pdu_remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	hdr = queue->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (queue->hdr_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	switch (hdr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	case nvme_tcp_c2h_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	case nvme_tcp_rsp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	case nvme_tcp_r2t:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			"unsupported pdu type (%d)\n", hdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static inline void nvme_tcp_end_request(struct request *rq, u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	union nvme_result res = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		nvme_complete_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			      unsigned int *offset, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct request *rq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		int recv_len, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		recv_len = min_t(size_t, *len, queue->data_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		if (!recv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		if (!iov_iter_count(&req->iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			req->curr_bio = req->curr_bio->bi_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			 * If we don`t have any bios it means that controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			 * sent more data than we requested, hence error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			if (!req->curr_bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 				dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 					"queue %d no space in request %#x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 					nvme_tcp_queue_id(queue), rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 				nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			nvme_tcp_init_iter(req, READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		/* we can read only from what is left in this bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		recv_len = min_t(size_t, recv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 				iov_iter_count(&req->iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				&req->iter, recv_len, queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			ret = skb_copy_datagram_iter(skb, *offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 					&req->iter, recv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				"queue %d failed to copy request %#x data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 				nvme_tcp_queue_id(queue), rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		*len -= recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		*offset += recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		queue->data_remaining -= recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (!queue->data_remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				queue->nr_cqe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		struct sk_buff *skb, unsigned int *offset, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	char *ddgst = (char *)&queue->recv_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	queue->ddgst_remaining -= recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	*offset += recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	*len -= recv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (queue->ddgst_remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (queue->recv_ddgst != queue->exp_ddgst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			"data digest error: recv %#x expected %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			le32_to_cpu(queue->recv_ddgst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			le32_to_cpu(queue->exp_ddgst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 					pdu->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		queue->nr_cqe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			     unsigned int offset, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct nvme_tcp_queue *queue = desc->arg.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	size_t consumed = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		switch (nvme_tcp_recv_state(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		case NVME_TCP_RECV_PDU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		case NVME_TCP_RECV_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		case NVME_TCP_RECV_DDGST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			result = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				"receive failed:  %d\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			queue->rd_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static void nvme_tcp_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct nvme_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (likely(queue && queue->rd_enabled) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static void nvme_tcp_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct nvme_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (likely(queue && sk_stream_is_writeable(sk))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static void nvme_tcp_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	struct nvme_tcp_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	queue = sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	case TCP_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	case TCP_CLOSE_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	case TCP_LAST_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	case TCP_FIN_WAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	case TCP_FIN_WAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		dev_info(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			"queue %d socket state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			nvme_tcp_queue_id(queue), sk->sk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	queue->state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	queue->request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (nvme_tcp_async_req(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		union nvme_result res = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		nvme_complete_async_event(&req->queue->ctrl->ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				NVME_SC_HOST_PATH_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	int req_data_len = req->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		struct page *page = nvme_tcp_req_cur_page(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		size_t offset = nvme_tcp_req_cur_offset(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		size_t len = nvme_tcp_req_cur_length(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		bool last = nvme_tcp_pdu_last_send(req, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		int req_data_sent = req->data_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		int ret, flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		if (sendpage_ok(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			ret = kernel_sendpage(queue->sock, page, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 					flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			ret = sock_no_sendpage(queue->sock, page, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 					flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			nvme_tcp_ddgst_update(queue->snd_hash, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 					offset, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 * update the request iterator except for the last payload send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 * in the request where we don't want to modify it as we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		 * compete with the RX path completing the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		if (req_data_sent + ret < req_data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			nvme_tcp_advance_req(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		/* fully successful last send in current PDU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (last && ret == len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			if (queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				nvme_tcp_ddgst_final(queue->snd_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 					&req->ddgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				req->state = NVME_TCP_SEND_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				req->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				nvme_tcp_done_send_req(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	bool inline_data = nvme_tcp_has_inline_data(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	int len = sizeof(*pdu) + hdgst - req->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	int flags = MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (inline_data || nvme_tcp_queue_more(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (queue->hdr_digest && !req->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			offset_in_page(pdu) + req->offset, len,  flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (inline_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			req->state = NVME_TCP_SEND_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 				crypto_ahash_init(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			nvme_tcp_init_iter(req, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			nvme_tcp_done_send_req(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	req->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	struct nvme_tcp_data_pdu *pdu = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	int len = sizeof(*pdu) - req->offset + hdgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (queue->hdr_digest && !req->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			offset_in_page(pdu) + req->offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	len -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		req->state = NVME_TCP_SEND_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			crypto_ahash_init(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		if (!req->data_sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			nvme_tcp_init_iter(req, WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	req->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	size_t offset = req->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct kvec iov = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		.iov_base = (u8 *)&req->ddgst + req->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (nvme_tcp_queue_more(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		msg.msg_flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		msg.msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (unlikely(ret <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		nvme_tcp_done_send_req(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	req->offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	struct nvme_tcp_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (!queue->request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		queue->request = nvme_tcp_fetch_request(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (!queue->request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	req = queue->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		ret = nvme_tcp_try_send_cmd_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		if (!nvme_tcp_has_inline_data(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		ret = nvme_tcp_try_send_data_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (req->state == NVME_TCP_SEND_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		ret = nvme_tcp_try_send_data(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (req->state == NVME_TCP_SEND_DDGST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		ret = nvme_tcp_try_send_ddgst(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	} else if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			"failed to send request %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		if (ret != -EPIPE && ret != -ECONNRESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			nvme_tcp_fail_request(queue->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		nvme_tcp_done_send_req(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct socket *sock = queue->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	read_descriptor_t rd_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	int consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	rd_desc.arg.data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	rd_desc.count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	queue->nr_cqe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return consumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static void nvme_tcp_io_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct nvme_tcp_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		container_of(w, struct nvme_tcp_queue, io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		bool pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		if (mutex_trylock(&queue->send_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			result = nvme_tcp_try_send(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			mutex_unlock(&queue->send_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 				pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			else if (unlikely(result < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		result = nvme_tcp_try_recv(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		else if (unlikely(result < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	ahash_request_free(queue->rcv_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	ahash_request_free(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct crypto_ahash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (IS_ERR(tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (!queue->snd_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		goto free_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (!queue->rcv_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		goto free_snd_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) free_snd_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ahash_request_free(queue->snd_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) free_tfm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	crypto_free_ahash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct nvme_tcp_request *async = &ctrl->async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	page_frag_free(async->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	struct nvme_tcp_request *async = &ctrl->async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	async->pdu = page_frag_alloc(&queue->pf_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (!async->pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	async->queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (queue->hdr_digest || queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		nvme_tcp_free_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	sock_release(queue->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	kfree(queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	mutex_destroy(&queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct nvme_tcp_icreq_pdu *icreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	struct nvme_tcp_icresp_pdu *icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	struct msghdr msg = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct kvec iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	bool ctrl_hdgst, ctrl_ddgst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (!icreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (!icresp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		goto free_icreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	icreq->hdr.type = nvme_tcp_icreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	icreq->hdr.hlen = sizeof(*icreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	icreq->hdr.pdo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	icreq->maxr2t = 0; /* single inflight r2t supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	icreq->hpda = 0; /* no alignment constraint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	if (queue->hdr_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	iov.iov_base = icreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	iov.iov_len = sizeof(*icreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	iov.iov_base = icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	iov.iov_len = sizeof(*icresp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			iov.iov_len, msg.msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (icresp->hdr.type != nvme_tcp_icresp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		pr_err("queue %d: bad type returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			nvme_tcp_queue_id(queue), icresp->hdr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		pr_err("queue %d: bad pdu length returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		pr_err("queue %d: bad pfv returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			nvme_tcp_queue_id(queue), icresp->pfv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if ((queue->data_digest && !ctrl_ddgst) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	    (!queue->data_digest && ctrl_ddgst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			nvme_tcp_queue_id(queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			queue->data_digest ? "enabled" : "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			ctrl_ddgst ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if ((queue->hdr_digest && !ctrl_hdgst) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	    (!queue->hdr_digest && ctrl_hdgst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			nvme_tcp_queue_id(queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			queue->hdr_digest ? "enabled" : "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			ctrl_hdgst ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (icresp->cpda != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		pr_err("queue %d: unsupported cpda returned %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			nvme_tcp_queue_id(queue), icresp->cpda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		goto free_icresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) free_icresp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	kfree(icresp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) free_icreq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	kfree(icreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return nvme_tcp_queue_id(queue) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	int qid = nvme_tcp_queue_id(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	return !nvme_tcp_admin_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	int qid = nvme_tcp_queue_id(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	return !nvme_tcp_admin_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		!nvme_tcp_default_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			  ctrl->io_queues[HCTX_TYPE_READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	int qid = nvme_tcp_queue_id(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return !nvme_tcp_admin_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		!nvme_tcp_default_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		!nvme_tcp_read_queue(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			  ctrl->io_queues[HCTX_TYPE_READ] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			  ctrl->io_queues[HCTX_TYPE_POLL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	int qid = nvme_tcp_queue_id(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (nvme_tcp_default_queue(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		n = qid - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	else if (nvme_tcp_read_queue(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	else if (nvme_tcp_poll_queue(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 				ctrl->io_queues[HCTX_TYPE_READ] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		int qid, size_t queue_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	int ret, rcv_pdu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	mutex_init(&queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	queue->ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	init_llist_head(&queue->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	INIT_LIST_HEAD(&queue->send_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	mutex_init(&queue->send_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	queue->queue_size = queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (qid > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 						NVME_TCP_ADMIN_CCSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			IPPROTO_TCP, &queue->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		dev_err(nctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			"failed to create socket: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		goto err_destroy_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	nvme_tcp_reclassify_socket(queue->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	/* Single syn retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	tcp_sock_set_syncnt(queue->sock->sk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/* Set TCP no delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	tcp_sock_set_nodelay(queue->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	 * close. This is done to prevent stale data from being sent should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	 * the network connection be restored before TCP times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	sock_no_linger(queue->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (so_priority > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		sock_set_priority(queue->sock->sk, so_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	/* Set socket type of service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	if (nctrl->opts->tos >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* Set 10 seconds timeout for icresp recvmsg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	nvme_tcp_set_queue_io_cpu(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	queue->request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	queue->data_remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	queue->ddgst_remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	queue->pdu_remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	queue->pdu_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	sk_set_memalloc(queue->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			sizeof(ctrl->src_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			dev_err(nctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 				"failed to bind queue %d socket %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 				qid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			goto err_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	queue->hdr_digest = nctrl->opts->hdr_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	queue->data_digest = nctrl->opts->data_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (queue->hdr_digest || queue->data_digest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		ret = nvme_tcp_alloc_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			dev_err(nctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				"failed to allocate queue %d crypto\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			goto err_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (!queue->pdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		goto err_crypto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	dev_dbg(nctrl->device, "connecting queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			nvme_tcp_queue_id(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		sizeof(ctrl->addr), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		dev_err(nctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			"failed to connect socket: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		goto err_rcv_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	ret = nvme_tcp_init_connection(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		goto err_init_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	queue->rd_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	nvme_tcp_init_recv_ctx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	queue->sock->sk->sk_user_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	queue->state_change = queue->sock->sk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	queue->data_ready = queue->sock->sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	queue->write_space = queue->sock->sk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) #ifdef CONFIG_NET_RX_BUSY_POLL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	queue->sock->sk->sk_ll_usec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) err_init_connect:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) err_rcv_pdu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	kfree(queue->pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) err_crypto:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	if (queue->hdr_digest || queue->data_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		nvme_tcp_free_crypto(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) err_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	sock_release(queue->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	queue->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) err_destroy_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	mutex_destroy(&queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	struct socket *sock = queue->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	write_lock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	sock->sk->sk_user_data  = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	sock->sk->sk_data_ready = queue->data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	sock->sk->sk_state_change = queue->state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	sock->sk->sk_write_space  = queue->write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	write_unlock_bh(&sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	nvme_tcp_restore_sock_calls(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	cancel_work_sync(&queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	mutex_lock(&queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		__nvme_tcp_stop_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	mutex_unlock(&queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		ret = nvmf_connect_io_queue(nctrl, idx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		ret = nvmf_connect_admin_queue(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		dev_err(nctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			"failed to connect queue: %d ret=%d\n", idx, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		bool admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct blk_mq_tag_set *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (admin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		set = &ctrl->admin_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		memset(set, 0, sizeof(*set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		set->ops = &nvme_tcp_admin_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		set->reserved_tags = 2; /* connect + keep-alive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		set->numa_node = nctrl->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		set->flags = BLK_MQ_F_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		set->cmd_size = sizeof(struct nvme_tcp_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		set->driver_data = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		set->nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		set->timeout = ADMIN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		set = &ctrl->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		memset(set, 0, sizeof(*set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		set->ops = &nvme_tcp_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		set->queue_depth = nctrl->sqsize + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		set->reserved_tags = 1; /* fabric connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		set->numa_node = nctrl->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		set->cmd_size = sizeof(struct nvme_tcp_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		set->driver_data = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		set->nr_hw_queues = nctrl->queue_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		set->timeout = NVME_IO_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	ret = blk_mq_alloc_tag_set(set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	return set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		cancel_work_sync(&ctrl->async_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	nvme_tcp_free_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	for (i = 1; i < ctrl->queue_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		nvme_tcp_free_queue(ctrl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	for (i = 1; i < ctrl->queue_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		nvme_tcp_stop_queue(ctrl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	for (i = 1; i < ctrl->queue_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		ret = nvme_tcp_start_queue(ctrl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			goto out_stop_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) out_stop_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	for (i--; i >= 1; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		nvme_tcp_stop_queue(ctrl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		goto out_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) out_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	nvme_tcp_free_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	for (i = 1; i < ctrl->queue_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		ret = nvme_tcp_alloc_queue(ctrl, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				ctrl->sqsize + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			goto out_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) out_free_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	for (i--; i >= 1; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		nvme_tcp_free_queue(ctrl, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	unsigned int nr_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	return nr_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		unsigned int nr_io_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	struct nvmf_ctrl_options *opts = nctrl->opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		 * separate read/write queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		 * hand out dedicated default queues only after we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		 * sufficient read queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			min(opts->nr_write_queues, nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		 * shared read/write queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		 * either no write queues were requested, or we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		 * sufficient queue count to have dedicated default queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			min(opts->nr_io_queues, nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (opts->nr_poll_queues && nr_io_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		/* map dedicated poll queues only if we have queues left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		ctrl->io_queues[HCTX_TYPE_POLL] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			min(opts->nr_poll_queues, nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	unsigned int nr_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (nr_io_queues == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			"unable to set any I/O queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	ctrl->queue_count = nr_io_queues + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	dev_info(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		"creating %d I/O queues.\n", nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	return __nvme_tcp_alloc_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	nvme_tcp_stop_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		blk_cleanup_queue(ctrl->connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		blk_mq_free_tag_set(ctrl->tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	nvme_tcp_free_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	ret = nvme_tcp_alloc_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		if (IS_ERR(ctrl->tagset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 			ret = PTR_ERR(ctrl->tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			goto out_free_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		if (IS_ERR(ctrl->connect_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			ret = PTR_ERR(ctrl->connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			goto out_free_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	ret = nvme_tcp_start_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		goto out_cleanup_connect_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		nvme_start_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 			 * If we timed out waiting for freeze we are likely to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			 * be stuck.  Fail the controller initialization just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			 * to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			goto out_wait_freeze_timed_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		blk_mq_update_nr_hw_queues(ctrl->tagset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			ctrl->queue_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		nvme_unfreeze(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) out_wait_freeze_timed_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	nvme_stop_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	nvme_sync_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	nvme_tcp_stop_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) out_cleanup_connect_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	nvme_cancel_tagset(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		blk_cleanup_queue(ctrl->connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) out_free_tag_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		blk_mq_free_tag_set(ctrl->tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) out_free_io_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	nvme_tcp_free_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	nvme_tcp_stop_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	if (remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		blk_cleanup_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		blk_cleanup_queue(ctrl->fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		blk_mq_free_tag_set(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	nvme_tcp_free_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	error = nvme_tcp_alloc_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		if (IS_ERR(ctrl->admin_tagset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			error = PTR_ERR(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			goto out_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		if (IS_ERR(ctrl->fabrics_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			error = PTR_ERR(ctrl->fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			goto out_free_tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		if (IS_ERR(ctrl->admin_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			error = PTR_ERR(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			goto out_cleanup_fabrics_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	error = nvme_tcp_start_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		goto out_cleanup_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	error = nvme_enable_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		goto out_stop_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	blk_mq_unquiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	error = nvme_init_identify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		goto out_quiesce_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) out_quiesce_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	blk_mq_quiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	blk_sync_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) out_stop_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	nvme_tcp_stop_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	nvme_cancel_admin_tagset(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) out_cleanup_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		blk_cleanup_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) out_cleanup_fabrics_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		blk_cleanup_queue(ctrl->fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) out_free_tagset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		blk_mq_free_tag_set(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) out_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	nvme_tcp_free_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		bool remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	blk_mq_quiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	blk_sync_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	nvme_tcp_stop_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (ctrl->admin_tagset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			nvme_cancel_request, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	if (remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		blk_mq_unquiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	nvme_tcp_destroy_admin_queue(ctrl, remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		bool remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (ctrl->queue_count <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	blk_mq_quiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	nvme_start_freeze(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	nvme_stop_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	nvme_sync_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	nvme_tcp_stop_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	if (ctrl->tagset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		blk_mq_tagset_busy_iter(ctrl->tagset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			nvme_cancel_request, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	if (remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		nvme_start_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	nvme_tcp_destroy_io_queues(ctrl, remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	/* If we are resetting/deleting then do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (ctrl->state != NVME_CTRL_CONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 			ctrl->state == NVME_CTRL_LIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	if (nvmf_should_reconnect(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			ctrl->opts->reconnect_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 				ctrl->opts->reconnect_delay * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		dev_info(ctrl->device, "Removing controller...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		nvme_delete_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	struct nvmf_ctrl_options *opts = ctrl->opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	if (ctrl->icdoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		dev_err(ctrl->device, "icdoff is not supported!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		goto destroy_admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	if (opts->queue_size > ctrl->sqsize + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		dev_warn(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			opts->queue_size, ctrl->sqsize + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		dev_warn(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			ctrl->sqsize + 1, ctrl->maxcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		ctrl->sqsize = ctrl->maxcmd - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	if (ctrl->queue_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		ret = nvme_tcp_configure_io_queues(ctrl, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			goto destroy_admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		 * state change failure is ok if we started ctrl delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		 * unless we're during creation of a new controller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		 * avoid races with teardown flow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		WARN_ON_ONCE(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		goto destroy_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	nvme_start_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) destroy_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	if (ctrl->queue_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		nvme_stop_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		nvme_sync_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		nvme_tcp_stop_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		nvme_cancel_tagset(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		nvme_tcp_destroy_io_queues(ctrl, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) destroy_admin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	blk_mq_quiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	blk_sync_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	nvme_tcp_stop_queue(ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	nvme_cancel_admin_tagset(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	nvme_tcp_destroy_admin_queue(ctrl, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 			struct nvme_tcp_ctrl, connect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	++ctrl->nr_reconnects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	if (nvme_tcp_setup_ctrl(ctrl, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		goto requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			ctrl->nr_reconnects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	ctrl->nr_reconnects = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) requeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			ctrl->nr_reconnects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	nvme_tcp_reconnect_or_remove(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static void nvme_tcp_error_recovery_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 				struct nvme_tcp_ctrl, err_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	nvme_stop_keep_alive(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	flush_work(&ctrl->async_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	nvme_tcp_teardown_io_queues(ctrl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	/* unquiesce to fail fast pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	nvme_start_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	nvme_tcp_teardown_admin_queue(ctrl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	blk_mq_unquiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		/* state change failure is ok if we started ctrl delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	nvme_tcp_reconnect_or_remove(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	blk_mq_quiesce_queue(ctrl->admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	if (shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		nvme_shutdown_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		nvme_disable_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	nvme_tcp_teardown_ctrl(ctrl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) static void nvme_reset_ctrl_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	struct nvme_ctrl *ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		container_of(work, struct nvme_ctrl, reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	nvme_stop_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	nvme_tcp_teardown_ctrl(ctrl, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		/* state change failure is ok if we started ctrl delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (nvme_tcp_setup_ctrl(ctrl, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	++ctrl->nr_reconnects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	nvme_tcp_reconnect_or_remove(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	if (list_empty(&ctrl->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		goto free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	mutex_lock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	list_del(&ctrl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	mutex_unlock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	nvmf_free_options(nctrl->opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) free_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	kfree(ctrl->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	kfree(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static void nvme_tcp_set_sg_null(struct nvme_command *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	sg->addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	sg->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 			NVME_SGL_FMT_TRANSPORT_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		struct nvme_command *c, u32 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	sg->length = cpu_to_le32(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		u32 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	sg->addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	sg->length = cpu_to_le32(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 			NVME_SGL_FMT_TRANSPORT_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	struct nvme_command *cmd = &pdu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	u8 hdgst = nvme_tcp_hdgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	memset(pdu, 0, sizeof(*pdu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	pdu->hdr.type = nvme_tcp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	if (queue->hdr_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	pdu->hdr.hlen = sizeof(*pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	cmd->common.opcode = nvme_admin_async_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	nvme_tcp_set_sg_null(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	ctrl->async_req.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	ctrl->async_req.curr_bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	ctrl->async_req.data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	nvme_tcp_queue_request(&ctrl->async_req, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) static void nvme_tcp_complete_timed_out(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) static enum blk_eh_timer_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) nvme_tcp_timeout(struct request *rq, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	dev_warn(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		"queue %d: timeout request %#x type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	if (ctrl->state != NVME_CTRL_LIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		 * If we are resetting, connecting or deleting we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		 * complete immediately because we may block controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		 * teardown or setup sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		 * - ctrl disable/shutdown fabrics requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		 * - connect requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		 * - initialization admin requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		 * - I/O requests that entered after unquiescing and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		 *   the controller stopped responding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		 * All other requests should be cancelled by the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		 * recovery work, so it's fine that we fail it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		nvme_tcp_complete_timed_out(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	 * LIVE state should trigger the normal error recovery which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	 * handle completing this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	nvme_tcp_error_recovery(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	struct nvme_command *c = &pdu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	c->common.flags |= NVME_CMD_SGL_METABUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (!blk_rq_nr_phys_segments(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		nvme_tcp_set_sg_null(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	else if (rq_data_dir(rq) == WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	    req->data_len <= nvme_tcp_inline_data_size(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		nvme_tcp_set_sg_host_data(c, req->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	struct nvme_tcp_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	req->state = NVME_TCP_SEND_CMD_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	req->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	req->data_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	req->pdu_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	req->pdu_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	req->data_len = blk_rq_nr_phys_segments(rq) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 				blk_rq_payload_bytes(rq) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	req->curr_bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	if (rq_data_dir(rq) == WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	    req->data_len <= nvme_tcp_inline_data_size(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		req->pdu_len = req->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	else if (req->curr_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		nvme_tcp_init_iter(req, READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	pdu->hdr.type = nvme_tcp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	pdu->hdr.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	if (queue->hdr_digest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	if (queue->data_digest && req->pdu_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		ddgst = nvme_tcp_ddgst_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	pdu->hdr.hlen = sizeof(*pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	pdu->hdr.plen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	ret = nvme_tcp_map_data(queue, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		nvme_cleanup_cmd(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			"Failed to map data (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	struct nvme_tcp_queue *queue = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	if (!llist_empty(&queue->req_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	struct nvme_ns *ns = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	struct nvme_tcp_queue *queue = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	struct request *rq = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	nvme_tcp_queue_request(req, true, bd->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		/* separate read/write queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		set->map[HCTX_TYPE_READ].nr_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			ctrl->io_queues[HCTX_TYPE_READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		set->map[HCTX_TYPE_READ].queue_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		/* shared read/write queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		set->map[HCTX_TYPE_READ].nr_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		set->map[HCTX_TYPE_READ].queue_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		/* map dedicated poll queues only if we have queues left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		set->map[HCTX_TYPE_POLL].nr_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 				ctrl->io_queues[HCTX_TYPE_POLL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		set->map[HCTX_TYPE_POLL].queue_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			ctrl->io_queues[HCTX_TYPE_READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	dev_info(ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		"mapped %d/%d/%d default/read/poll queues.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		ctrl->io_queues[HCTX_TYPE_READ],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		ctrl->io_queues[HCTX_TYPE_POLL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	struct nvme_tcp_queue *queue = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	struct sock *sk = queue->sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		sk_busy_loop(sk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	nvme_tcp_try_recv(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	return queue->nr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) static const struct blk_mq_ops nvme_tcp_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	.queue_rq	= nvme_tcp_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	.commit_rqs	= nvme_tcp_commit_rqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	.complete	= nvme_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	.init_request	= nvme_tcp_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	.exit_request	= nvme_tcp_exit_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	.init_hctx	= nvme_tcp_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	.timeout	= nvme_tcp_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	.map_queues	= nvme_tcp_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	.poll		= nvme_tcp_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	.queue_rq	= nvme_tcp_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	.complete	= nvme_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	.init_request	= nvme_tcp_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	.exit_request	= nvme_tcp_exit_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	.init_hctx	= nvme_tcp_init_admin_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	.timeout	= nvme_tcp_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	.name			= "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	.module			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	.flags			= NVME_F_FABRICS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	.reg_read32		= nvmf_reg_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	.reg_read64		= nvmf_reg_read64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	.reg_write32		= nvmf_reg_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	.free_ctrl		= nvme_tcp_free_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	.submit_async_event	= nvme_tcp_submit_async_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	.delete_ctrl		= nvme_tcp_delete_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	.get_address		= nvmf_get_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	struct nvme_tcp_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	mutex_lock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	mutex_unlock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	struct nvme_tcp_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (!ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	INIT_LIST_HEAD(&ctrl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	ctrl->ctrl.opts = opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 				opts->nr_poll_queues + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	ctrl->ctrl.sqsize = opts->queue_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	ctrl->ctrl.kato = opts->kato;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	INIT_DELAYED_WORK(&ctrl->connect_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 			nvme_tcp_reconnect_ctrl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		opts->trsvcid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		if (!opts->trsvcid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			goto out_free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		opts->mask |= NVMF_OPT_TRSVCID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			opts->traddr, opts->trsvcid, &ctrl->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		pr_err("malformed address passed: %s:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 			opts->traddr, opts->trsvcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		goto out_free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			opts->host_traddr, NULL, &ctrl->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			pr_err("malformed src address passed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			       opts->host_traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			goto out_free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		ret = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		goto out_free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	if (!ctrl->queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		goto out_free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		goto out_kfree_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 		goto out_uninit_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		goto out_uninit_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	mutex_lock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	mutex_unlock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	return &ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) out_uninit_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	nvme_uninit_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	nvme_put_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) out_kfree_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	kfree(ctrl->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) out_free_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	kfree(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) static struct nvmf_transport_ops nvme_tcp_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	.name		= "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	.module		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	.required_opts	= NVMF_OPT_TRADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 			  NVMF_OPT_TOS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	.create_ctrl	= nvme_tcp_create_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static int __init nvme_tcp_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (!nvme_tcp_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	nvmf_register_transport(&nvme_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) static void __exit nvme_tcp_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	struct nvme_tcp_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	nvmf_unregister_transport(&nvme_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	mutex_lock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		nvme_delete_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	mutex_unlock(&nvme_tcp_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	flush_workqueue(nvme_delete_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	destroy_workqueue(nvme_tcp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) module_init(nvme_tcp_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) module_exit(nvme_tcp_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) MODULE_LICENSE("GPL v2");