Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <uapi/scsi/fc/fc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <uapi/scsi/fc/fc_els.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/nvme-fc-driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/nvme-fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "../host/fc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) /* *************************** Data Structures/Defines ****************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define NVMET_LS_CTX_COUNT		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) struct nvmet_fc_tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) struct nvmet_fc_tgt_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) struct nvmet_fc_ls_iod {		/* for an LS RQST RCV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	struct nvmefc_ls_rsp		*lsrsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	struct list_head		ls_rcv_list; /* tgtport->ls_rcv_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	struct nvmet_fc_tgt_assoc	*assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	void				*hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	union nvmefc_ls_requests	*rqstbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	union nvmefc_ls_responses	*rspbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	u16				rqstdatalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	dma_addr_t			rspdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct scatterlist		sg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct work_struct		work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) } __aligned(sizeof(unsigned long long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) struct nvmet_fc_ls_req_op {		/* for an LS RQST XMT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	struct nvmefc_ls_req		ls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	void				*hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	int				ls_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct list_head		lsreq_list; /* tgtport->ls_req_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	bool				req_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) /* desired maximum for a single sequence - if sg list allows it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define NVMET_FC_MAX_SEQ_LENGTH		(256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) enum nvmet_fcp_datadir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	NVMET_FCP_NODATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	NVMET_FCP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	NVMET_FCP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	NVMET_FCP_ABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) struct nvmet_fc_fcp_iod {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct nvmefc_tgt_fcp_req	*fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct nvme_fc_cmd_iu		cmdiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct nvme_fc_ersp_iu		rspiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	dma_addr_t			rspdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	struct scatterlist		*next_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct scatterlist		*data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	int				data_sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	u32				offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	enum nvmet_fcp_datadir		io_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	bool				active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	bool				abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	bool				aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	bool				writedataactive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	spinlock_t			flock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	struct nvmet_req		req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct work_struct		defer_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct nvmet_fc_tgt_queue	*queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct list_head		fcp_list;	/* tgtport->fcp_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) struct nvmet_fc_tgtport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct nvmet_fc_target_port	fc_target_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct list_head		tgt_list; /* nvmet_fc_target_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct device			*dev;	/* dev for dma mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct nvmet_fc_target_template	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct nvmet_fc_ls_iod		*iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct list_head		ls_rcv_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct list_head		ls_req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct list_head		ls_busylist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct list_head		assoc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct list_head		host_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct ida			assoc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	struct nvmet_fc_port_entry	*pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct kref			ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	u32				max_sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) struct nvmet_fc_port_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct nvmet_port		*port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u64				node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	u64				port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	struct list_head		pe_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) struct nvmet_fc_defer_fcp_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct list_head		req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct nvmefc_tgt_fcp_req	*fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) struct nvmet_fc_tgt_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	bool				ninetypercent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	u16				qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	u16				sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	u16				ersp_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	__le16				sqhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	atomic_t			connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	atomic_t			sqtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	atomic_t			zrspcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	atomic_t			rsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	spinlock_t			qlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	struct nvmet_cq			nvme_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	struct nvmet_sq			nvme_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct nvmet_fc_tgt_assoc	*assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	struct list_head		fod_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	struct list_head		pending_cmd_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct list_head		avail_defer_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	struct workqueue_struct		*work_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct kref			ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) } __aligned(sizeof(unsigned long long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) struct nvmet_fc_hostport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	void				*hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct list_head		host_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct kref			ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	u8				invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) struct nvmet_fc_tgt_assoc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	u64				association_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	u32				a_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	atomic_t			terminating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct nvmet_fc_tgtport		*tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct nvmet_fc_hostport	*hostport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct nvmet_fc_ls_iod		*rcv_disconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct list_head		a_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct kref			ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct work_struct		del_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	return (iodptr - iodptr->tgtport->iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return (fodptr - fodptr->queue->fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * Association and Connection IDs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * Association ID will have random number in upper 6 bytes and zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  *   in lower 2 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * note: Association ID = Connection ID for queue 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define BYTES_FOR_QID			sizeof(u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	return (assoc->association_id | qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) nvmet_fc_getassociationid(u64 connectionid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return connectionid & ~NVMET_FC_QUEUEID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static inline u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) nvmet_fc_getqueueid(u64 connectionid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static inline struct nvmet_fc_tgtport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	return container_of(targetport, struct nvmet_fc_tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 				 fc_target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static inline struct nvmet_fc_fcp_iod *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) nvmet_req_to_fod(struct nvmet_req *nvme_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) /* *************************** Globals **************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static LIST_HEAD(nvmet_fc_target_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static DEFINE_IDA(nvmet_fc_tgtport_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static LIST_HEAD(nvmet_fc_portentry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 					struct nvmet_fc_fcp_iod *fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 				struct nvmet_fc_ls_iod *iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) /* *********************** FC-NVME DMA Handling **************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * The fcloop device passes in a NULL device pointer. Real LLD's will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * pass in a valid device pointer. If NULL is passed to the dma mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * routines, depending on the platform, it may or may not succeed, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * may crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * As such:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * Wrapper all the dma routines and check the dev pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * If simple mappings (return just a dma address, we'll noop them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * returning a dma address of 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * On more complex mappings (dma_map_sg), a pseudo routine fills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * in the scatter list, setting all dma addresses to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static inline dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) fc_dma_map_single(struct device *dev, void *ptr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return dev ? dma_mapping_error(dev, dma_addr) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		dma_unmap_single(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		dma_sync_single_for_cpu(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		dma_sync_single_for_device(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /* pseudo dma_map_sg call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) fc_map_sg(struct scatterlist *sg, int nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	WARN_ON(nents == 0 || sg[0].length == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		s->dma_address = 0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) #ifdef CONFIG_NEED_SG_DMA_LENGTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		s->dma_length = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		dma_unmap_sg(dev, sg, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) /* ********************** FC-NVME LS XMT Handling ************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if (!lsop->req_queued) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	list_del(&lsop->lsreq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	lsop->req_queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				  (lsreq->rqstlen + lsreq->rsplen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 				  DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		struct nvmet_fc_ls_req_op *lsop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		void (*done)(struct nvmefc_ls_req *req, int status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (!tgtport->ops->ls_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (!nvmet_fc_tgtport_get(tgtport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	lsreq->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	lsop->req_queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	INIT_LIST_HEAD(&lsop->lsreq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				  lsreq->rqstlen + lsreq->rsplen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				  DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		goto out_puttgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	lsop->req_queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				   lsreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		goto out_unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) out_unlink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	lsop->ls_error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	lsop->req_queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	list_del(&lsop->lsreq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 				  (lsreq->rqstlen + lsreq->rsplen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				  DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) out_puttgtport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		struct nvmet_fc_ls_req_op *lsop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		void (*done)(struct nvmefc_ls_req *req, int status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* don't wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	return __nvmet_fc_send_ls_req(tgtport, lsop, done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct nvmet_fc_ls_req_op *lsop =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	__nvmet_fc_finish_ls_req(lsop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* fc-nvme target doesn't care about success or failure of cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	kfree(lsop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * This routine sends a FC-NVME LS to disconnect (aka terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * the FC-NVME Association.  Terminating the association also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * terminates the FC-NVME connections (per queue, both admin and io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * queues) that are part of the association. E.g. things are torn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * down, and the related FC-NVME Association ID and Connection IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  * become invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * The behavior of the fc-nvme target is such that it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * understanding of the association and connections will implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * be torn down. The action is implicit as it may be due to a loss of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * connectivity with the fc-nvme host, so the target may never get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * response even if it tried.  As such, the action of this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  * is to asynchronously send the LS, ignore any results of the LS, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * continue on with terminating the association. If the fc-nvme host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  * is present and receives the LS, it too can tear down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct nvmet_fc_ls_req_op *lsop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct nvmefc_ls_req *lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * If ls_req is NULL or no hosthandle, it's an older lldd and no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * message is normal. Otherwise, send unless the hostport has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 * already been invalidated by the lldd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (!tgtport->ops->ls_req || !assoc->hostport ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	    assoc->hostport->invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	lsop = kzalloc((sizeof(*lsop) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			sizeof(*discon_rqst) + sizeof(*discon_acc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (!lsop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			"{%d:%d} send Disconnect Association failed: ENOMEM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			tgtport->fc_target_port.port_num, assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	lsreq = &lsop->ls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (tgtport->ops->lsrqst_priv_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		lsreq->private = (void *)&discon_acc[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		lsreq->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	lsop->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	lsop->hosthandle = assoc->hostport->hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				assoc->association_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				nvmet_fc_disconnect_assoc_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			"{%d:%d} XMT Disconnect Association failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			tgtport->fc_target_port.port_num, assoc->a_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		kfree(lsop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /* *********************** FC-NVME Port Management ************************ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct nvmet_fc_ls_iod *iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (!iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	tgtport->iod = iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		iod->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				       sizeof(union nvmefc_ls_responses),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		if (!iod->rqstbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 						sizeof(*iod->rspbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 						DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	kfree(iod->rqstbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	list_del(&iod->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	for (iod--, i--; i >= 0; iod--, i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		kfree(iod->rqstbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		list_del(&iod->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	kfree(iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct nvmet_fc_ls_iod *iod = tgtport->iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		fc_dma_unmap_single(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 				iod->rspdma, sizeof(*iod->rspbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		kfree(iod->rqstbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		list_del(&iod->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	kfree(tgtport->iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static struct nvmet_fc_ls_iod *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct nvmet_fc_ls_iod *iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 					struct nvmet_fc_ls_iod, ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	return iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 				struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct nvmet_fc_fcp_iod *fod = queue->fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	for (i = 0; i < queue->sqsize; fod++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		fod->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		fod->queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		fod->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		fod->abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		fod->aborted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		fod->fcpreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		list_add_tail(&fod->fcp_list, &queue->fod_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		spin_lock_init(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			list_del(&fod->fcp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			for (fod--, i--; i >= 0; fod--, i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 						sizeof(fod->rspiubuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 						DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				fod->rspdma = 0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 				list_del(&fod->fcp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct nvmet_fc_fcp_iod *fod = queue->fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	for (i = 0; i < queue->sqsize; fod++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		if (fod->rspdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static struct nvmet_fc_fcp_iod *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct nvmet_fc_fcp_iod *fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	lockdep_assert_held(&queue->qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	fod = list_first_entry_or_null(&queue->fod_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 					struct nvmet_fc_fcp_iod, fcp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (fod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		list_del(&fod->fcp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		fod->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * no queue reference is taken, as it was taken by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * queue lookup just prior to the allocation. The iod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * will "inherit" that reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		       struct nvmet_fc_tgt_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		       struct nvmefc_tgt_fcp_req *fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * put all admin cmds on hw queue id 0. All io commands go to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * the respective hw queue based on a modulo basis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	fcpreq->hwqid = queue->qid ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct nvmet_fc_fcp_iod *fod =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		container_of(work, struct nvmet_fc_fcp_iod, defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* Submit deferred IO for processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct nvmet_fc_defer_fcp_req *deferfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	fcpreq->nvmet_fc_private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	fod->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	fod->abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	fod->aborted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	fod->writedataactive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	fod->fcpreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/* release the queue lookup reference on the completed IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 				struct nvmet_fc_defer_fcp_req, req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (!deferfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* Re-use the fod for the next pending cmd that was deferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	list_del(&deferfcp->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	fcpreq = deferfcp->fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	/* deferfcp can be reused for another IO at a later date */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/* Save NVME CMD IO in fod */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Setup new fcpreq to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	fcpreq->rspaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	fcpreq->rsplen  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	fcpreq->nvmet_fc_private = fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	fod->fcpreq = fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	fod->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/* inform LLDD IO is now being processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	 * Leave the queue lookup get reference taken when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	 * fod was originally allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	queue_work(queue->work_q, &fod->defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static struct nvmet_fc_tgt_queue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			u16 qid, u16 sqsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (qid > NVMET_NR_QUEUES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (!nvmet_fc_tgt_a_get(assoc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		goto out_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				assoc->tgtport->fc_target_port.port_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 				assoc->a_id, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (!queue->work_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		goto out_a_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	queue->qid = qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	queue->sqsize = sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	queue->assoc = assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	INIT_LIST_HEAD(&queue->fod_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	INIT_LIST_HEAD(&queue->avail_defer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	INIT_LIST_HEAD(&queue->pending_cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	atomic_set(&queue->connected, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	atomic_set(&queue->sqtail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	atomic_set(&queue->rsn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	atomic_set(&queue->zrspcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	spin_lock_init(&queue->qlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	kref_init(&queue->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	ret = nvmet_sq_init(&queue->nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		goto out_fail_iodlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	WARN_ON(assoc->queues[qid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	spin_lock_irqsave(&assoc->tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	assoc->queues[qid] = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	return queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) out_fail_iodlist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	destroy_workqueue(queue->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) out_a_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) out_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	kfree(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) nvmet_fc_tgt_queue_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	struct nvmet_fc_tgt_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		container_of(ref, struct nvmet_fc_tgt_queue, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	queue->assoc->queues[queue->qid] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	nvmet_fc_tgt_a_put(queue->assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	destroy_workqueue(queue->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	kfree(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	return kref_get_unless_zero(&queue->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct nvmet_fc_fcp_iod *fod = queue->fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	bool disconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	disconnect = atomic_xchg(&queue->connected, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	/* if not connected, nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (!disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* abort outstanding io's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	for (i = 0; i < queue->sqsize; fod++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		if (fod->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			spin_lock(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			fod->abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			 * only call lldd abort routine if waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			 * writedata. other outstanding ops should finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			 * on their own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			if (fod->writedataactive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 				fod->aborted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 				spin_unlock(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				tgtport->ops->fcp_abort(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 					&tgtport->fc_target_port, fod->fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				spin_unlock(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	/* Cleanup defer'ed IOs in queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				req_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		list_del(&deferfcp->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		kfree(deferfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				struct nvmet_fc_defer_fcp_req, req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		if (!deferfcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		list_del(&deferfcp->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		tgtport->ops->defer_rcv(&tgtport->fc_target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				deferfcp->fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		tgtport->ops->fcp_abort(&tgtport->fc_target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				deferfcp->fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				deferfcp->fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		/* release the queue lookup reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		kfree(deferfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	flush_workqueue(queue->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	nvmet_sq_destroy(&queue->nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static struct nvmet_fc_tgt_queue *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				u64 connection_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	struct nvmet_fc_tgt_assoc *assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	u64 association_id = nvmet_fc_getassociationid(connection_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	u16 qid = nvmet_fc_getqueueid(connection_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (qid > NVMET_NR_QUEUES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		if (association_id == assoc->association_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			queue = assoc->queues[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			if (queue &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			    (!atomic_read(&queue->connected) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			     !nvmet_fc_tgt_q_get(queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			return queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) nvmet_fc_hostport_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct nvmet_fc_hostport *hostport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		container_of(ref, struct nvmet_fc_hostport, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	list_del(&hostport->host_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (tgtport->ops->host_release && hostport->invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		tgtport->ops->host_release(hostport->hosthandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	kfree(hostport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	kref_put(&hostport->ref, nvmet_fc_hostport_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return kref_get_unless_zero(&hostport->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	/* if LLDD not implemented, leave as NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (!hostport || !hostport->hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	nvmet_fc_hostport_put(hostport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static struct nvmet_fc_hostport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	struct nvmet_fc_hostport *newhost, *host, *match = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/* if LLDD not implemented, leave as NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (!hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/* take reference for what will be the newly allocated hostport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!nvmet_fc_tgtport_get(tgtport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (!newhost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		list_for_each_entry(host, &tgtport->host_list, host_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			if (host->hosthandle == hosthandle && !host->invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				if (nvmet_fc_hostport_get(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 					match = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		/* no allocation - release reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		return (match) ? match : ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	newhost->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	newhost->hosthandle = hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	INIT_LIST_HEAD(&newhost->host_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	kref_init(&newhost->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	list_for_each_entry(host, &tgtport->host_list, host_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (host->hosthandle == hosthandle && !host->invalid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			if (nvmet_fc_hostport_get(host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				match = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		kfree(newhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		newhost = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		/* releasing allocation - release reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		list_add_tail(&newhost->host_list, &tgtport->host_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return (match) ? match : newhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) nvmet_fc_delete_assoc(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct nvmet_fc_tgt_assoc *assoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		container_of(work, struct nvmet_fc_tgt_assoc, del_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	nvmet_fc_delete_target_assoc(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static struct nvmet_fc_tgt_assoc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	u64 ran;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	bool needrandom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (!assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		goto out_free_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (!nvmet_fc_tgtport_get(tgtport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		goto out_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (IS_ERR(assoc->hostport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	assoc->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	assoc->a_id = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	INIT_LIST_HEAD(&assoc->a_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	kref_init(&assoc->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	atomic_set(&assoc->terminating, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	while (needrandom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		ran = ran << BYTES_FOR_QID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		needrandom = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			if (ran == tmpassoc->association_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				needrandom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (!needrandom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			assoc->association_id = ran;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	return assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) out_ida:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	ida_simple_remove(&tgtport->assoc_cnt, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) out_free_assoc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	kfree(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) nvmet_fc_target_assoc_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct nvmet_fc_tgt_assoc *assoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	struct nvmet_fc_ls_iod	*oldls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	/* Send Disconnect now that all i/o has completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	nvmet_fc_xmt_disconnect_assoc(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	nvmet_fc_free_hostport(assoc->hostport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	list_del(&assoc->a_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	oldls = assoc->rcv_disconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/* if pending Rcv Disconnect Association LS, send rsp now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	if (oldls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		"{%d:%d} Association freed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		tgtport->fc_target_port.port_num, assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	kfree(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	return kref_get_unless_zero(&assoc->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	int i, terminating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	terminating = atomic_xchg(&assoc->terminating, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	/* if already terminating, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (terminating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	for (i = NVMET_NR_QUEUES; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		queue = assoc->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		if (queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			if (!nvmet_fc_tgt_q_get(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			nvmet_fc_delete_target_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		"{%d:%d} Association deleted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		tgtport->fc_target_port.port_num, assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static struct nvmet_fc_tgt_assoc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				u64 association_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	struct nvmet_fc_tgt_assoc *assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	struct nvmet_fc_tgt_assoc *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		if (association_id == assoc->association_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			ret = assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			if (!nvmet_fc_tgt_a_get(assoc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			struct nvmet_fc_port_entry *pe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	lockdep_assert_held(&nvmet_fc_tgtlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	pe->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	tgtport->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	pe->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	port->priv = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	pe->node_name = tgtport->fc_target_port.node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	pe->port_name = tgtport->fc_target_port.port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	INIT_LIST_HEAD(&pe->pe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (pe->tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		pe->tgtport->pe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	list_del(&pe->pe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * called when a targetport deregisters. Breaks the relationship
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * with the nvmet port, but leaves the port_entry in place so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * re-registration can resume operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	struct nvmet_fc_port_entry *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	pe = tgtport->pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		pe->tgtport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	tgtport->pe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  * called when a new targetport is registered. Looks in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  * existing nvmet port_entries to see if the nvmet layer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * configured for the targetport's wwn's. (the targetport existed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  * nvmet configured, the lldd unregistered the tgtport, and is now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  * reregistering the same targetport).  If so, set the nvmet port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * port entry on the targetport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	struct nvmet_fc_port_entry *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		if (tgtport->fc_target_port.node_name == pe->node_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		    tgtport->fc_target_port.port_name == pe->port_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			WARN_ON(pe->tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			tgtport->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			pe->tgtport = tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * nvme_fc_register_targetport - transport entry point called by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  *                              LLDD to register the existence of a local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  *                              NVME subystem FC port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * @pinfo:     pointer to information about the port to be registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * @template:  LLDD entrypoints and operational parameters for the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * @dev:       physical hardware device node port corresponds to. Will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  *             used for DMA mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * @portptr:   pointer to a local port pointer. Upon success, the routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  *             will allocate a nvme_fc_local_port structure and place its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  *             address in the local port pointer. Upon failure, local port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  *             pointer will be set to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * a completion status. Must be 0 upon success; a negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * (ex: -ENXIO) upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			struct nvmet_fc_target_template *template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			struct nvmet_fc_target_port **portptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct nvmet_fc_tgtport *newrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	int ret, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (!template->xmt_ls_rsp || !template->fcp_op ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	    !template->fcp_abort ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	    !template->fcp_req_release || !template->targetport_delete ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	    !template->max_hw_queues || !template->max_sgl_segments ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	    !template->max_dif_sgl_segments || !template->dma_boundary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		goto out_regtgt_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (!newrec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		goto out_regtgt_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		goto out_fail_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	if (!get_device(dev) && dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		goto out_ida_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	newrec->fc_target_port.node_name = pinfo->node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	newrec->fc_target_port.port_name = pinfo->port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	if (template->target_priv_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		newrec->fc_target_port.private = &newrec[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		newrec->fc_target_port.private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	newrec->fc_target_port.port_id = pinfo->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	newrec->fc_target_port.port_num = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	INIT_LIST_HEAD(&newrec->tgt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	newrec->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	newrec->ops = template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	spin_lock_init(&newrec->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	INIT_LIST_HEAD(&newrec->ls_rcv_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	INIT_LIST_HEAD(&newrec->ls_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	INIT_LIST_HEAD(&newrec->ls_busylist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	INIT_LIST_HEAD(&newrec->assoc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	INIT_LIST_HEAD(&newrec->host_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	kref_init(&newrec->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	ida_init(&newrec->assoc_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	newrec->max_sg_cnt = template->max_sgl_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	ret = nvmet_fc_alloc_ls_iodlist(newrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		goto out_free_newrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	nvmet_fc_portentry_rebind_tgt(newrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	*portptr = &newrec->fc_target_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) out_free_newrec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) out_ida_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) out_fail_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	kfree(newrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) out_regtgt_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	*portptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) nvmet_fc_free_tgtport(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct nvmet_fc_tgtport *tgtport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		container_of(ref, struct nvmet_fc_tgtport, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	struct device *dev = tgtport->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	list_del(&tgtport->tgt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	nvmet_fc_free_ls_iodlist(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	/* let the LLDD know we've finished tearing it down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	ida_simple_remove(&nvmet_fc_tgtport_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			tgtport->fc_target_port.port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	ida_destroy(&tgtport->assoc_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	kfree(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	return kref_get_unless_zero(&tgtport->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	struct nvmet_fc_tgt_assoc *assoc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	list_for_each_entry_safe(assoc, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 				&tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		if (!nvmet_fc_tgt_a_get(assoc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (!schedule_work(&assoc->del_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			/* already deleting - release local reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)  * nvmet_fc_invalidate_host - transport entry point called by an LLDD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)  *                       to remove references to a hosthandle for LS's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  * The nvmet-fc layer ensures that any references to the hosthandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  * on the targetport are forgotten (set to NULL).  The LLDD will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)  * typically call this when a login with a remote host port has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)  * lost, thus LS's for the remote host port are no longer possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * If an LS request is outstanding to the targetport/hosthandle (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  * issued concurrently with the call to invalidate the host), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  * LLDD is responsible for terminating/aborting the LS and completing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * the LS request. It is recommended that these terminations/aborts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * occur after calling to invalidate the host handle to avoid additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  * retries by the nvmet-fc transport. The nvmet-fc transport may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  * continue to reference host handle while it cleans up outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)  * NVME associations. The nvmet-fc transport will call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)  * ops->host_release() callback to notify the LLDD that all references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)  * are complete and the related host handle can be recovered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)  * Note: if there are no references, the callback may be called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  * the invalidate host call returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)  * @target_port: pointer to the (registered) target port that a prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  *              LS was received on and which supplied the transport the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  *              hosthandle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)  * @hosthandle: the handle (pointer) that represents the host port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)  *              that no longer has connectivity and that LS's should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)  *              no longer be directed to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			void *hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct nvmet_fc_tgt_assoc *assoc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	bool noassoc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	list_for_each_entry_safe(assoc, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				&tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		if (!assoc->hostport ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		    assoc->hostport->hosthandle != hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		if (!nvmet_fc_tgt_a_get(assoc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		assoc->hostport->invalid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		noassoc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		if (!schedule_work(&assoc->del_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			/* already deleting - release local reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/* if there's nothing to wait for - call the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (noassoc && tgtport->ops->host_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		tgtport->ops->host_release(hosthandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  * nvmet layer has called to terminate an association
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	struct nvmet_fc_tgtport *tgtport, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	struct nvmet_fc_tgt_assoc *assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	bool found_ctrl = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	/* this is a bit ugly, but don't want to make locks layered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			tgt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		if (!nvmet_fc_tgtport_get(tgtport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 			queue = assoc->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			if (queue && queue->nvme_sq.ctrl == ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 				if (nvmet_fc_tgt_a_get(assoc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 					found_ctrl = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		if (found_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			if (!schedule_work(&assoc->del_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				/* already deleting - release local reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 				nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * nvme_fc_unregister_targetport - transport entry point called by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  *                              LLDD to deregister/remove a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  *                              registered a local NVME subsystem FC port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  * @target_port: pointer to the (registered) target port that is to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)  *               deregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * a completion status. Must be 0 upon success; a negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * (ex: -ENXIO) upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	nvmet_fc_portentry_unbind_tgt(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	/* terminate any outstanding associations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	__nvmet_fc_free_assocs(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * should terminate LS's as well. However, LS's will be generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 * at the tail end of association termination, so they likely don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	 * exist yet. And even if they did, it's worthwhile to just let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	 * them finish and targetport ref counting will clean things up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* ********************** FC-NVME LS RCV Handling ************************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	memset(acc, 0, sizeof(*acc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	 * FC-NVME spec changes. There are initiators sending different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	 * lengths as padding sizes for Create Association Cmd descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	 * was incorrect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	 * Accept anything of "minimum" length. Assume format per 1.15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	 * spec (with HOSTID reduced to 16 bytes), ignore how long the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	 * trailing pad length is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		ret = VERR_CR_ASSOC_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	else if (be32_to_cpu(rqst->desc_list_len) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		ret = VERR_CR_ASSOC_RQST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	else if (rqst->assoc_cmd.desc_tag !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		ret = VERR_CR_ASSOC_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		ret = VERR_CR_ASSOC_CMD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	else if (!rqst->assoc_cmd.ersp_ratio ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 				be16_to_cpu(rqst->assoc_cmd.sqsize)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		ret = VERR_ERSP_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		/* new association w/ admin queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		iod->assoc = nvmet_fc_alloc_target_assoc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 						tgtport, iod->hosthandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if (!iod->assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			ret = VERR_ASSOC_ALLOC_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 					be16_to_cpu(rqst->assoc_cmd.sqsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 				ret = VERR_QUEUE_ALLOC_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		dev_err(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			"Create Association LS failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			validation_errors[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				sizeof(*acc), rqst->w0.ls_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				FCNVME_RJT_RC_LOGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				FCNVME_RJT_EXP_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	atomic_set(&queue->connected, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	queue->sqhd = 0;	/* best place to init value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		"{%d:%d} Association created\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		tgtport->fc_target_port.port_num, iod->assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	/* format a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	iod->lsrsp->rsplen = sizeof(*acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				sizeof(struct fcnvme_ls_cr_assoc_acc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			FCNVME_LS_CREATE_ASSOCIATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	acc->associd.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 				sizeof(struct fcnvme_lsdesc_assoc_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	acc->associd.association_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	acc->connectid.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				sizeof(struct fcnvme_lsdesc_conn_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	acc->connectid.connection_id = acc->associd.association_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	memset(acc, 0, sizeof(*acc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		ret = VERR_CR_CONN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	else if (rqst->desc_list_len !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				sizeof(struct fcnvme_ls_cr_conn_rqst)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		ret = VERR_CR_CONN_RQST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		ret = VERR_ASSOC_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	else if (rqst->associd.desc_len !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 				sizeof(struct fcnvme_lsdesc_assoc_id)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		ret = VERR_ASSOC_ID_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	else if (rqst->connect_cmd.desc_tag !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		ret = VERR_CR_CONN_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	else if (rqst->connect_cmd.desc_len !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		ret = VERR_CR_CONN_CMD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	else if (!rqst->connect_cmd.ersp_ratio ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 				be16_to_cpu(rqst->connect_cmd.sqsize)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		ret = VERR_ERSP_RATIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		/* new io queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 				be64_to_cpu(rqst->associd.association_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		if (!iod->assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			ret = VERR_NO_ASSOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 					be16_to_cpu(rqst->connect_cmd.qid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 					be16_to_cpu(rqst->connect_cmd.sqsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				ret = VERR_QUEUE_ALLOC_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			/* release get taken in nvmet_fc_find_target_assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			nvmet_fc_tgt_a_put(iod->assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		dev_err(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			"Create Connection LS failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			validation_errors[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 				sizeof(*acc), rqst->w0.ls_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 				(ret == VERR_NO_ASSOC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 					FCNVME_RJT_RC_INV_ASSOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 					FCNVME_RJT_RC_LOGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 				FCNVME_RJT_EXP_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	atomic_set(&queue->connected, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	queue->sqhd = 0;	/* best place to init value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	/* format a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	iod->lsrsp->rsplen = sizeof(*acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			FCNVME_LS_CREATE_CONNECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	acc->connectid.desc_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 				sizeof(struct fcnvme_lsdesc_conn_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	acc->connectid.connection_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 				be16_to_cpu(rqst->connect_cmd.qid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  * Returns true if the LS response is to be transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * Returns false if the LS response is to be delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	struct fcnvme_ls_disconnect_assoc_rqst *rqst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 						&iod->rqstbuf->rq_dis_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	struct fcnvme_ls_disconnect_assoc_acc *acc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 						&iod->rspbuf->rsp_dis_assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct nvmet_fc_tgt_assoc *assoc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	struct nvmet_fc_ls_iod *oldls = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	memset(acc, 0, sizeof(*acc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		/* match an active association - takes an assoc ref if !NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		assoc = nvmet_fc_find_target_assoc(tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				be64_to_cpu(rqst->associd.association_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		iod->assoc = assoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		if (!assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 			ret = VERR_NO_ASSOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (ret || !assoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		dev_err(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			"Disconnect LS failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			validation_errors[ret]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 				sizeof(*acc), rqst->w0.ls_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 				(ret == VERR_NO_ASSOC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 					FCNVME_RJT_RC_INV_ASSOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 					FCNVME_RJT_RC_LOGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 				FCNVME_RJT_EXP_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	/* format a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	iod->lsrsp->rsplen = sizeof(*acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			fcnvme_lsdesc_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 				sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			FCNVME_LS_DISCONNECT_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	/* release get taken in nvmet_fc_find_target_assoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	nvmet_fc_tgt_a_put(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	 * The rules for LS response says the response cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	 * go back until ABTS's have been sent for all outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	 * I/O and a Disconnect Association LS has been sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	 * So... save off the Disconnect LS to send the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	 * later. If there was a prior LS already saved, replace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	 * it with the newer one and send a can't perform reject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	 * on the older one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	spin_lock_irqsave(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	oldls = assoc->rcv_disconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	assoc->rcv_disconn = iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	spin_unlock_irqrestore(&tgtport->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	nvmet_fc_delete_target_assoc(assoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	if (oldls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			"{%d:%d} Multiple Disconnect Association LS's "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			"received\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			tgtport->fc_target_port.port_num, assoc->a_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		/* overwrite good response with bogus failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 						sizeof(*iod->rspbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 						/* ok to use rqst, LS is same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 						rqst->w0.ls_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 						FCNVME_RJT_RC_UNAB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 						FCNVME_RJT_EXP_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		nvmet_fc_xmt_ls_rsp(tgtport, oldls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* *********************** NVME Ctrl Routines **************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 				sizeof(*iod->rspbuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	nvmet_fc_free_ls_iod(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 				struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 				  sizeof(*iod->rspbuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * Actual processing routine for received FC-NVME LS Requests from the LLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			struct nvmet_fc_ls_iod *iod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	bool sendrsp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	iod->lsrsp->nvme_fc_private = iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	iod->lsrsp->rspbuf = iod->rspbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	iod->lsrsp->rspdma = iod->rspdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	/* Be preventative. handlers will later set to valid length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	iod->lsrsp->rsplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	iod->assoc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	 * handlers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	 *   parse request input, execute the request, and format the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	 *   LS response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	switch (w0->ls_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	case FCNVME_LS_CREATE_ASSOCIATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		/* Creates Association and initial Admin Queue/Connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		nvmet_fc_ls_create_association(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	case FCNVME_LS_CREATE_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		/* Creates an IO Queue/Connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		nvmet_fc_ls_create_connection(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	case FCNVME_LS_DISCONNECT_ASSOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		/* Terminate a Queue/Connection or the Association */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 				sizeof(*iod->rspbuf), w0->ls_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 				FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (sendrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		nvmet_fc_xmt_ls_rsp(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  * Actual processing routine for received FC-NVME LS Requests from the LLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	struct nvmet_fc_ls_iod *iod =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		container_of(work, struct nvmet_fc_ls_iod, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	nvmet_fc_handle_ls_rqst(tgtport, iod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  *                       upon the reception of a NVME LS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  * The nvmet-fc layer will copy payload to an internal structure for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  * processing.  As such, upon completion of the routine, the LLDD may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  * immediately free/reuse the LS request buffer passed in the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  * If this routine returns error, the LLDD should abort the exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * @target_port: pointer to the (registered) target port the LS was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  *              received on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  * @lsrsp:      pointer to a lsrsp structure to be used to reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  *              the exchange corresponding to the LS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  * @lsreqbuf:   pointer to the buffer containing the LS Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  * @lsreqbuf_len: length, in bytes, of the received LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			void *hosthandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			struct nvmefc_ls_rsp *lsrsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			void *lsreqbuf, u32 lsreqbuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	struct nvmet_fc_ls_iod *iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			"RCV %s LS failed: payload too large (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 				nvmefc_ls_names[w0->ls_cmd] : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			lsreqbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (!nvmet_fc_tgtport_get(tgtport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			"RCV %s LS failed: target deleting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 				nvmefc_ls_names[w0->ls_cmd] : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	iod = nvmet_fc_alloc_ls_iod(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	if (!iod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		dev_info(tgtport->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			"RCV %s LS failed: context allocation failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 				nvmefc_ls_names[w0->ls_cmd] : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		nvmet_fc_tgtport_put(tgtport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	iod->lsrsp = lsrsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	iod->fcpreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	iod->rqstdatalen = lsreqbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	iod->hosthandle = hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	schedule_work(&iod->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)  * **********************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)  * Start of FCP handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)  * **********************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	unsigned int nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	fod->data_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	fod->data_sg_cnt = nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 				((fod->io_dir == NVMET_FCP_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 				/* note: write from initiator perspective */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	fod->next_sg = fod->data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (!fod->data_sg || !fod->data_sg_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 				((fod->io_dir == NVMET_FCP_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 					DMA_FROM_DEVICE : DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	sgl_free(fod->data_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	fod->data_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	fod->data_sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	u32 sqtail, used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	/* egad, this is ugly. And sqtail is just a best guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	sqtail = atomic_read(&q->sqtail) % q->sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)  * Prep RSP payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 				struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	struct nvme_completion *cqe = &ersp->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	u32 *cqewd = (u32 *)cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	bool send_ersp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	u32 rsn, rspcnt, xfr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		xfr_length = fod->req.transfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		xfr_length = fod->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	 * check to see if we can send a 0's rsp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	 *   Note: to send a 0's response, the NVME-FC host transport will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	 *   seen in an ersp), and command_id. Thus it will create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	 *   zero-filled CQE with those known fields filled in. Transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	 *   must send an ersp for any condition where the cqe won't match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	 *   this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	 * Here are the FC-NVME mandated cases where we must send an ersp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	 *  every N responses, where N=ersp_ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	 *  force fabric commands to send ersp's (not in FC-NVME but good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	 *    practice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	 *  normal cmds: any time status is non-zero, or status is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	 *     but words 0 or 1 are non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	 *  the SQ is 90% or more full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	 *  the cmd is a fused command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	 *  transferred data length not equal to cmd iu length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	if (!(rspcnt % fod->queue->ersp_ratio) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	    nvme_is_fabrics((struct nvme_command *) sqe) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	    xfr_length != fod->req.transfer_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		send_ersp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	/* re-set the fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	fod->fcpreq->rspaddr = ersp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	fod->fcpreq->rspdma = fod->rspdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	if (!send_ersp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		rsn = atomic_inc_return(&fod->queue->rsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		ersp->rsn = cpu_to_be32(rsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		ersp->xfrd_len = cpu_to_be32(xfr_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		fod->fcpreq->rsplen = sizeof(*ersp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	/* data no longer needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	nvmet_fc_free_tgt_pgs(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	 * if an ABTS was received or we issued the fcp_abort early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	 * don't call abort routine again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	/* no need to take lock - lock was taken earlier to get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	if (!fod->aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	nvmet_fc_free_fcp_iod(fod->queue, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 				struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	fod->fcpreq->op = NVMET_FCOP_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	fod->fcpreq->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		nvmet_fc_abort_op(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				struct nvmet_fc_fcp_iod *fod, u8 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	struct scatterlist *sg = fod->next_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	u32 remaininglen = fod->req.transfer_len - fod->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	u32 tlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	fcpreq->op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	fcpreq->offset = fod->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	 * for next sequence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	 *  break at a sg element boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	 *  attempt to keep sequence length capped at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	 *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	 *    be longer if a single sg element is larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	 *    than that amount. This is done to avoid creating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	 *    a new sg list to use for the tgtport api.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	fcpreq->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	fcpreq->sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	while (tlen < remaininglen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	       fcpreq->sg_cnt < tgtport->max_sg_cnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	       tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		fcpreq->sg_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		tlen += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		fcpreq->sg_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		tlen += min_t(u32, sg_dma_len(sg), remaininglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	if (tlen < remaininglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		fod->next_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		fod->next_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	fcpreq->transfer_length = tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	fcpreq->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	fcpreq->fcp_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	fcpreq->rsplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	 * If the last READDATA request: check if LLDD supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	 * combined xfr with response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	if ((op == NVMET_FCOP_READDATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		fcpreq->op = NVMET_FCOP_READDATA_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		 * should be ok to set w/o lock as its in the thread of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		 * execution (not an async timer routine) and doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		 * contend with any clearing action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		fod->abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		if (op == NVMET_FCOP_WRITEDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			spin_lock_irqsave(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 			fod->writedataactive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			spin_unlock_irqrestore(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		} else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			fcpreq->fcp_error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 			fcpreq->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	/* if in the middle of an io and we need to tear down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		nvmet_fc_abort_op(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)  * actual done handler for FCP operations when completed by the lldd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	bool abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	spin_lock_irqsave(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	abort = fod->abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	fod->writedataactive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	spin_unlock_irqrestore(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	switch (fcpreq->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	case NVMET_FCOP_WRITEDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		if (__nvmet_fc_fod_op_abort(fod, abort))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		if (fcpreq->fcp_error ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		    fcpreq->transferred_length != fcpreq->transfer_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 			spin_lock_irqsave(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			fod->abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 			spin_unlock_irqrestore(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		fod->offset += fcpreq->transferred_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		if (fod->offset != fod->req.transfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			spin_lock_irqsave(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			fod->writedataactive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			spin_unlock_irqrestore(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			/* transfer the next chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			nvmet_fc_transfer_fcp_data(tgtport, fod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 						NVMET_FCOP_WRITEDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		/* data transfer complete, resume with nvmet layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		fod->req.execute(&fod->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	case NVMET_FCOP_READDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	case NVMET_FCOP_READDATA_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		if (__nvmet_fc_fod_op_abort(fod, abort))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		if (fcpreq->fcp_error ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		    fcpreq->transferred_length != fcpreq->transfer_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 			nvmet_fc_abort_op(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		/* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			/* data no longer needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 			nvmet_fc_free_tgt_pgs(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			nvmet_fc_free_fcp_iod(fod->queue, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		fod->offset += fcpreq->transferred_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		if (fod->offset != fod->req.transfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 			/* transfer the next chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			nvmet_fc_transfer_fcp_data(tgtport, fod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 						NVMET_FCOP_READDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		/* data transfer complete, send response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		/* data no longer needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		nvmet_fc_free_tgt_pgs(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	case NVMET_FCOP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		if (__nvmet_fc_fod_op_abort(fod, abort))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		nvmet_fc_free_fcp_iod(fod->queue, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	nvmet_fc_fod_op_done(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)  * actual completion handler after execution by the nvmet layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 			struct nvmet_fc_fcp_iod *fod, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	bool abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	spin_lock_irqsave(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	abort = fod->abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	spin_unlock_irqrestore(&fod->flock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	/* if we have a CQE, snoop the last sq_head value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		fod->queue->sqhd = cqe->sq_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		nvmet_fc_abort_op(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	/* if an error handling the cmd post initial parsing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		/* fudge up a failed CQE status for our transport error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		memset(cqe, 0, sizeof(*cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		cqe->command_id = sqe->command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		cqe->status = cpu_to_le16(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		 * try to push the data even if the SQE status is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		 * There may be a status where data still was intended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		 * be moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			/* push the data over before sending rsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 			nvmet_fc_transfer_fcp_data(tgtport, fod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 						NVMET_FCOP_READDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		/* writes & no data - fall thru */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	/* data no longer needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	nvmet_fc_free_tgt_pgs(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)  * Actual processing routine for received FC-NVME I/O Requests from the LLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			struct nvmet_fc_fcp_iod *fod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	u32 xfrlen = be32_to_cpu(cmdiu->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	 * Fused commands are currently not supported in the linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	 * implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	 * As such, the implementation of the FC transport does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	 * look at the fused commands and order delivery to the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	 * layer until we have both based on csn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		fod->io_dir = NVMET_FCP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		if (!nvme_is_write(&cmdiu->sqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 			goto transport_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		fod->io_dir = NVMET_FCP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		if (nvme_is_write(&cmdiu->sqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			goto transport_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		fod->io_dir = NVMET_FCP_NODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		if (xfrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 			goto transport_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	fod->req.cmd = &fod->cmdiubuf.sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	fod->req.cqe = &fod->rspiubuf.cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (tgtport->pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		fod->req.port = tgtport->pe->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	/* clear any response payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	fod->data_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	fod->data_sg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	ret = nvmet_req_init(&fod->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 				&fod->queue->nvme_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 				&fod->queue->nvme_sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 				&nvmet_fc_tgt_fcp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		/* bad SQE content or invalid ctrl state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		/* nvmet layer has already called op done to send rsp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	fod->req.transfer_len = xfrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	/* keep a running counter of tail position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	atomic_inc(&fod->queue->sqtail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	if (fod->req.transfer_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		ret = nvmet_fc_alloc_tgt_pgs(fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			nvmet_req_complete(&fod->req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	fod->req.sg = fod->data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	fod->req.sg_cnt = fod->data_sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	fod->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	if (fod->io_dir == NVMET_FCP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		/* pull the data over before invoking nvmet layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	 * Reads or no data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	 * can invoke the nvmet_layer now. If read data, cmd completion will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	 * push the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	fod->req.execute(&fod->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) transport_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	nvmet_fc_abort_op(tgtport, fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)  *                       upon the reception of a NVME FCP CMD IU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)  * layer for processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)  * The nvmet_fc layer allocates a local job structure (struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)  * nvmet_fc_fcp_iod) from the queue for the io and copies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)  * CMD IU buffer to the job structure. As such, on a successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)  * completion (returns 0), the LLDD may immediately free/reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)  * the CMD IU buffer passed in the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)  * However, in some circumstances, due to the packetized nature of FC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)  * and the api of the FC LLDD which may issue a hw command to send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)  * response, but the LLDD may not get the hw completion for that command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)  * and upcall the nvmet_fc layer before a new command may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)  * asynchronously received - its possible for a command to be received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)  * before the LLDD and nvmet_fc have recycled the job structure. It gives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)  * the appearance of more commands received than fits in the sq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)  * To alleviate this scenario, a temporary queue is maintained in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)  * transport for pending LLDD requests waiting for a queue job structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)  * In these "overrun" cases, a temporary queue element is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)  * the LLDD request and CMD iu buffer information remembered, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)  * routine returns a -EOVERFLOW status. Subsequently, when a queue job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)  * structure is freed, it is immediately reallocated for anything on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)  * pending request list. The LLDDs defer_rcv() callback is called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)  * informing the LLDD that it may reuse the CMD IU buffer, and the io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)  * is then started normally with the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)  * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)  * the completion as successful but must not reuse the CMD IU buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)  * until the LLDD's defer_rcv() callback has been called for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)  * corresponding struct nvmefc_tgt_fcp_req pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)  * If there is any other condition in which an error occurs, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)  * transport will return a non-zero status indicating the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)  * In all cases other than -EOVERFLOW, the transport has not accepted the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)  * request and the LLDD should abort the exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)  * @target_port: pointer to the (registered) target port the FCP CMD IU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)  *              was received on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)  *              the exchange corresponding to the FCP Exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 			struct nvmefc_tgt_fcp_req *fcpreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 			void *cmdiubuf, u32 cmdiubuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	struct nvmet_fc_fcp_iod *fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	struct nvmet_fc_defer_fcp_req *deferfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	/* validate iu, so the connection id can be used to find the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 			(cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	queue = nvmet_fc_find_target_queue(tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 				be64_to_cpu(cmdiu->connection_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	 * note: reference taken by find_target_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	 * After successful fod allocation, the fod will inherit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	 * ownership of that reference and will remove the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	 * when the fod is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	fod = nvmet_fc_alloc_fcp_iod(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	if (fod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		fcpreq->nvmet_fc_private = fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		fod->fcpreq = fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (!tgtport->ops->defer_rcv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		/* release the queue lookup reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 			struct nvmet_fc_defer_fcp_req, req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	if (deferfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		/* Just re-use one that was previously allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		list_del(&deferfcp->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		/* Now we need to dynamically allocate one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		if (!deferfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			/* release the queue lookup reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			nvmet_fc_tgt_q_put(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	/* For now, use rspaddr / rsplen to save payload information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	fcpreq->rspaddr = cmdiubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	fcpreq->rsplen  = cmdiubuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	deferfcp->fcp_req = fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	/* defer processing till a fod becomes available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	/* NOTE: the queue lookup reference is still valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)  * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)  *                       upon the reception of an ABTS for a FCP command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)  * Notify the transport that an ABTS has been received for a FCP command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)  * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)  * LLDD believes the command is still being worked on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)  * (template_ops->fcp_req_release() has not been called).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)  * The transport will wait for any outstanding work (an op to the LLDD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  * which the lldd should complete with error due to the ABTS; or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  * completion from the nvmet layer of the nvme command), then will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)  * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  * to the ABTS either after return from this function (assuming any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  * outstanding op work has been terminated) or upon the callback being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  * called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  * @target_port: pointer to the (registered) target port the FCP CMD IU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  *              was received on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * @fcpreq:     pointer to the fcpreq request structure that corresponds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  *              to the exchange that received the ABTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 			struct nvmefc_tgt_fcp_req *fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	struct nvmet_fc_tgt_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	if (!fod || fod->fcpreq != fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		/* job appears to have already completed, ignore abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	queue = fod->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	spin_lock_irqsave(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	if (fod->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		 * mark as abort. The abort handler, invoked upon completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		 * of any work, will detect the aborted status and do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		 * callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		spin_lock(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		fod->abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		fod->aborted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		spin_unlock(&fod->flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	spin_unlock_irqrestore(&queue->qlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) struct nvmet_fc_traddr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	u64	nn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	u64	pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	u64 token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	if (match_u64(sstr, &token64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	*val = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)  * This routine validates and extracts the WWN's from the TRADDR string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)  * As kernel parsers need the 0x to determine number base, universally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)  * build string to parse with 0x prefix before parsing name strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	substring_t wwn = { name, &name[sizeof(name)-1] };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	int nnoffset, pnoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	/* validate if string is one of the 2 allowed formats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 			!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 				"pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		nnoffset = NVME_FC_TRADDR_OXNNLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 						NVME_FC_TRADDR_OXNNLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	} else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			!strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			!strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 				"pn-", NVME_FC_TRADDR_NNLEN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		nnoffset = NVME_FC_TRADDR_NNLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		goto out_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	name[0] = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	name[1] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		goto out_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		goto out_einval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) out_einval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	pr_warn("%s: bad traddr string\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) nvmet_fc_add_port(struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	struct nvmet_fc_tgtport *tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	struct nvmet_fc_port_entry *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	struct nvmet_fc_traddr traddr = { 0L, 0L };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	/* validate the address info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	/* map the traddr address info to a target port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			sizeof(port->disc_addr.traddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	if (!pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		    (tgtport->fc_target_port.port_name == traddr.pn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 			/* a FC port can only be 1 nvmet port id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			if (!tgtport->pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 				nvmet_fc_portentry_bind(tgtport, pe, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 				ret = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		kfree(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) nvmet_fc_remove_port(struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	struct nvmet_fc_port_entry *pe = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	nvmet_fc_portentry_unbind(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	kfree(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) nvmet_fc_discovery_chg(struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	struct nvmet_fc_port_entry *pe = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	struct nvmet_fc_tgtport *tgtport = pe->tgtport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	if (tgtport && tgtport->ops->discovery_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		tgtport->ops->discovery_event(&tgtport->fc_target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	.type			= NVMF_TRTYPE_FC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	.msdbd			= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	.add_port		= nvmet_fc_add_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	.remove_port		= nvmet_fc_remove_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	.delete_ctrl		= nvmet_fc_delete_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	.discovery_chg		= nvmet_fc_discovery_chg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) static int __init nvmet_fc_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static void __exit nvmet_fc_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	/* sanity check - all lports should be removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	if (!list_empty(&nvmet_fc_target_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		pr_warn("%s: targetport list not empty\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	ida_destroy(&nvmet_fc_tgtport_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) module_init(nvmet_fc_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) module_exit(nvmet_fc_exit_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) MODULE_LICENSE("GPL v2");