Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2003-2015 Chelsio Communications.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This program is distributed in the hope that it will be useful, but WITHOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * release for licensing terms and conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Written by:	Dimitris Michailidis (dm@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *		Karen Xie (kxie@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "t3_cpl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "t3cdev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "cxgb3_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "cxgb3_ctl_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "cxgb3_offload.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "firmware_exports.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "cxgb3i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) static unsigned int dbg_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "../libcxgbi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define DRV_MODULE_NAME         "cxgb3i"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define DRV_MODULE_DESC         "Chelsio T3 iSCSI Driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define DRV_MODULE_VERSION	"2.0.1-ko"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define DRV_MODULE_RELDATE	"Apr. 2015"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	DRV_MODULE_DESC " " DRV_MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) MODULE_AUTHOR("Chelsio Communications, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) MODULE_DESCRIPTION(DRV_MODULE_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) MODULE_VERSION(DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) module_param(dbg_level, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static int cxgb3i_rcv_win = 256 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) module_param(cxgb3i_rcv_win, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static int cxgb3i_snd_win = 128 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) module_param(cxgb3i_snd_win, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static int cxgb3i_rx_credit_thres = 10 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) module_param(cxgb3i_rx_credit_thres, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) MODULE_PARM_DESC(cxgb3i_rx_credit_thres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		 "RX credits return threshold in bytes (default=10KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static unsigned int cxgb3i_max_connect = 8 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) module_param(cxgb3i_max_connect, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static unsigned int cxgb3i_sport_base = 20000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) module_param(cxgb3i_sport_base, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void cxgb3i_dev_open(struct t3cdev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static void cxgb3i_dev_close(struct t3cdev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static struct cxgb3_client t3_client = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.name = DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	.handlers = cxgb3i_cpl_handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	.add = cxgb3i_dev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	.remove = cxgb3i_dev_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	.event_handler = cxgb3i_dev_event_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static struct scsi_host_template cxgb3i_host_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	.module		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	.name		= DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	.proc_name	= DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	.can_queue	= CXGB3I_SCSI_HOST_QDEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	.queuecommand	= iscsi_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	.change_queue_depth = scsi_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	.sg_tablesize	= SG_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	.max_sectors	= 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	.cmd_per_lun	= ISCSI_DEF_CMD_PER_LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	.eh_timed_out	= iscsi_eh_cmd_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	.eh_abort_handler = iscsi_eh_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	.eh_device_reset_handler = iscsi_eh_device_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	.eh_target_reset_handler = iscsi_eh_recover_target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.target_alloc	= iscsi_target_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	.dma_boundary	= PAGE_SIZE - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.this_id	= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	.track_queue_depth = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static struct iscsi_transport cxgb3i_iscsi_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	.name		= DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	/* owner and name should be set already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 				| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 				CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	.attr_is_visible	= cxgbi_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	.get_host_param	= cxgbi_get_host_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	.set_host_param	= cxgbi_set_host_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* session management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	.create_session	= cxgbi_create_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	.destroy_session	= cxgbi_destroy_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	.get_session_param = iscsi_session_get_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* connection management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	.create_conn	= cxgbi_create_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	.bind_conn	= cxgbi_bind_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	.destroy_conn	= iscsi_tcp_conn_teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	.start_conn	= iscsi_conn_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	.stop_conn	= iscsi_conn_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	.get_conn_param	= iscsi_conn_get_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	.set_param	= cxgbi_set_conn_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	.get_stats	= cxgbi_get_conn_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	/* pdu xmit req from user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	.send_pdu	= iscsi_conn_send_pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	/* task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	.init_task	= iscsi_tcp_task_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	.xmit_task	= iscsi_tcp_task_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	.cleanup_task	= cxgbi_cleanup_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/* pdu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.alloc_pdu	= cxgbi_conn_alloc_pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.init_pdu	= cxgbi_conn_init_pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	/* TCP connect/disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	.get_ep_param	= cxgbi_get_ep_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	.ep_connect	= cxgbi_ep_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	.ep_poll	= cxgbi_ep_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	.ep_disconnect	= cxgbi_ep_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	/* Error recovery timeout call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	.session_recovery_timedout = iscsi_session_recovery_timedout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static struct scsi_transport_template *cxgb3i_stt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * CPL (Chelsio Protocol Language) defines a message passing interface between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * the host driver and Chelsio asic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * The section below implments CPLs that related to iscsi tcp connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * open/close/abort and data send/receive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			      const struct l2t_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	skb->priority = CPL_PRIORITY_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	req->local_port = csk->saddr.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	req->peer_port = csk->daddr.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	req->local_ip = csk->saddr.sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	req->peer_ip = csk->daddr.sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			V_RCV_BUFSIZ(csk->rcv_win >> 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		"csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		csk, csk->state, csk->flags, csk->atid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		&req->local_ip, ntohs(req->local_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		&req->peer_ip, ntohs(req->peer_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		csk->mss_idx, e->idx, e->smt_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	cxgbi_sock_act_open_req_arp_failure(NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * CPL connection close request: host ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * the write queue (i.e., after any unsent txt data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static void send_close_req(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct sk_buff *skb = csk->cpl_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned int tid = csk->tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		"csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	csk->cpl_close = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	req->wr.wr_lo = htonl(V_WR_TID(tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	req->rsvd = htonl(csk->write_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	cxgbi_sock_skb_entail(csk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (csk->state >= CTP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		push_tx_frames(csk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * CPL connection abort request: host ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * for the same connection and also that we do not try to send a message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * after the connection has closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	struct cpl_abort_req *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		"t3dev 0x%p, tid %u, skb 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		tdev, GET_TID(req), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	req->cmd = CPL_ABORT_NO_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	cxgb3_ofld_send(tdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static void send_abort_req(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct sk_buff *skb = csk->cpl_abort_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct cpl_abort_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (unlikely(csk->state == CTP_ABORTING || !skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	cxgbi_sock_set_state(csk, CTP_ABORTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* Purge the send queue so we don't send anything after an abort. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	cxgbi_sock_purge_write_queue(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	csk->cpl_abort_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	req = (struct cpl_abort_req *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	skb->priority = CPL_PRIORITY_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	set_arp_failure_handler(skb, abort_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	req->rsvd0 = htonl(csk->snd_nxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	req->cmd = CPL_ABORT_SEND_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		"csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		req->rsvd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * CPL connection abort reply: host ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * Send an ABORT_RPL message in response of the ABORT_REQ received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct sk_buff *skb = csk->cpl_abort_rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		"csk 0x%p,%u,0x%lx,%u, status %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		csk, csk->state, csk->flags, csk->tid, rst_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	csk->cpl_abort_rpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	skb->priority = CPL_PRIORITY_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	rpl->cmd = rst_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	cxgb3_ofld_send(csk->cdev->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * CPL connection rx data ack: host ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * credits sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct cpl_rx_data_ack *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		"csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		csk, csk->state, csk->flags, csk->tid, credits, dack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	req = (struct cpl_rx_data_ack *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				V_RX_CREDITS(credits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	skb->priority = CPL_PRIORITY_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	cxgb3_ofld_send(csk->cdev->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	return credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * CPL connection tx data: host ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * Send iscsi PDU via TX_DATA CPL message. Returns the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * credits sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) static unsigned int wrlen __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static void init_wr_tab(unsigned int wr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (skb_wrs[1])		/* already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		int sgl_len = (3 * i) / 2 + (i & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		sgl_len += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		skb_wrs[i] = (sgl_len <= wr_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			      ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	wrlen = wr_len * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 				   int len, int req_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct tx_data_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	struct l2t_entry *l2t = csk->l2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	req = __skb_push(skb, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			(req_completion ? F_WR_COMPL : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	req->wr_lo = htonl(V_WR_TID(csk->tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/* len includes the length of any HW ULP additions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	req->len = htonl(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	/* V_TX_ULP_SUBMODE sets both the mode and submode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			   V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	req->sndseq = htonl(csk->snd_nxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	req->param = htonl(V_TX_PORT(l2t->smt_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 				    V_TX_CPU_IDX(csk->rss_qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		/* sendbuffer is in units of 32KB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * push_tx_frames -- start transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * connection's send queue and sends them on to T3.  Must be called with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * connection's lock held.  Returns the amount of send buffer space that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * freed as a result of sending queued data to T3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	int total_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (unlikely(csk->state < CTP_ESTABLISHED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				"csk 0x%p,%u,0x%lx,%u, in closing state.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		int len = skb->len;	/* length before skb_push */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		int wrs_needed = skb_wrs[frags];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			wrs_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		if (csk->wr_cred < wrs_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				"csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 				csk, skb->len, skb->data_len, frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				wrs_needed, csk->wr_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		__skb_unlink(skb, &csk->write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		skb->priority = CPL_PRIORITY_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		skb->csum = wrs_needed;	/* remember this until the WR_ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		csk->wr_cred -= wrs_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		csk->wr_una_cred += wrs_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		cxgbi_sock_enqueue_wr(csk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			"csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			"left %u, unack %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			csk, skb->len, skb->data_len, frags, skb->csum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			csk->wr_cred, csk->wr_una_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			if ((req_completion &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 				csk->wr_una_cred == wrs_needed) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			     csk->wr_una_cred >= csk->wr_max_cred / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 				req_completion = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 				csk->wr_una_cred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			make_tx_data_wr(csk, skb, len, req_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			csk->snd_nxt += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		total_size += skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			"csk 0x%p, tid 0x%x, send skb 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			csk, csk->tid, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		set_arp_failure_handler(skb, arp_failure_skb_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		l2t_send(csk->cdev->lldev, skb, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	return total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * Process a CPL_ACT_ESTABLISH message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * Updates connection state from an active establish CPL message.  Runs with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * the connection lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static inline void free_atid(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		cxgb3_free_atid(csk->cdev->lldev, csk->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct cpl_act_establish *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	unsigned int tid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	u32 rcv_isn = ntohl(req->rcv_isn);	/* real RCV_ISN + 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		"atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		atid, atid, csk, csk->state, csk->flags, rcv_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	csk->tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	free_atid(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	csk->rss_qid = G_QNUM(ntohs(skb->csum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (csk->retry_timer.function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		del_timer(&csk->retry_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		csk->retry_timer.function = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (unlikely(csk->state != CTP_ACTIVE_OPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		/* upper layer has requested closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		send_abort_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		if (skb_queue_len(&csk->write_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			push_tx_frames(csk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		cxgbi_conn_tx_open(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  * Process a CPL_ACT_OPEN_RPL message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * Handle active open failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static int act_open_rpl_status_to_errno(int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	case CPL_ERR_CONN_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	case CPL_ERR_ARP_MISS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	case CPL_ERR_CONN_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	case CPL_ERR_TCAM_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	case CPL_ERR_CONN_EXIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) static void act_open_retry_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		"csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		cxgbi_sock_fail_act_open(csk, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		skb->sk = (struct sock *)csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		set_arp_failure_handler(skb, act_open_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		send_act_open_req(csk, skb, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		csk, csk->state, csk->flags, csk->atid, rpl->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		&csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	if (rpl->status != CPL_ERR_TCAM_FULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	    rpl->status != CPL_ERR_CONN_EXIST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	    rpl->status != CPL_ERR_ARP_MISS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		cxgb3_queue_tid_release(tdev, GET_TID(rpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (rpl->status == CPL_ERR_CONN_EXIST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	    csk->retry_timer.function != act_open_retry_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		csk->retry_timer.function = act_open_retry_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		mod_timer(&csk->retry_timer, jiffies + HZ / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		cxgbi_sock_fail_act_open(csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				act_open_rpl_status_to_errno(rpl->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * Process PEER_CLOSE CPL messages: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * Handle peer FIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		"csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	cxgbi_sock_rcv_peer_close(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * Process CLOSE_CONN_RPL CPL message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * Process a peer ACK to our FIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			    void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		"csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * Process ABORT_REQ_RSS CPL message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * Process abort requests.  If we are waiting for an ABORT_RPL we ignore this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * request except that we need to reply to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				 int *need_rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	switch (abort_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	case CPL_ERR_BAD_SYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	case CPL_ERR_CONN_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	case CPL_ERR_XMIT_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	case CPL_ERR_PERSIST_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	case CPL_ERR_FINWAIT2_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	case CPL_ERR_KEEPALIVE_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	const struct cpl_abort_req_rss *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	int rst_status = CPL_ABORT_NO_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		"csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	    req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		cxgbi_sock_set_state(csk, CTP_ABORTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	send_abort_rpl(csk, rst_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		csk->err = abort_status_to_errno(csk, req->status, &rst_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * Process ABORT_RPL_RSS CPL message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * Process abort replies.  We only process these messages if we anticipate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * them as the coordination between SW and HW in this area is somewhat lacking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * and sometimes we get ABORT_RPLs after we are done with the connection that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * originated the ABORT_REQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		"status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		rpl->status, csk, csk ? csk->state : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		csk ? csk->flags : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * Ignore replies to post-close aborts indicating that the abort was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * requested too late.  These connections are terminated when we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 * arrives the TID is either no longer used or it has been recycled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (rpl->status == CPL_ERR_ABORT_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		goto rel_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * Sometimes we've already closed the connection, e.g., a post-close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * abort races with ABORT_REQ_RSS, the latter frees the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 * but FW turns the ABORT_REQ into a regular one and so we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	 * ABORT_RPL_RSS with status 0 and no connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		cxgbi_sock_rcv_abort_rpl(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) rel_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * Process RX_ISCSI_HDR CPL message: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * Handle received PDUs, the payload could be DDP'ed. If not, the payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * follow after the bhs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct cpl_iscsi_hdr_norss data_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct cpl_rx_data_ddp_norss ddp_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	unsigned int hdr_len, data_len, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		"csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		csk, csk->state, csk->flags, csk->tid, skb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			"csk 0x%p,%u,0x%lx,%u, bad state.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		if (csk->state != CTP_ABORTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			goto abort_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	cxgbi_skcb_flags(skb) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	len = hdr_len = ntohs(hdr_cpl->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	/* msg coalesce is off or not enough data received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (skb->len <= hdr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			csk->cdev->ports[csk->port_id]->name, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			skb->len, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		goto abort_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			    sizeof(ddp_cpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			csk->cdev->ports[csk->port_id]->name, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			skb->len, sizeof(ddp_cpl), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		goto abort_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	status = ntohl(ddp_cpl.ddp_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		"csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				csk->cdev->ports[csk->port_id]->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				csk->tid, sizeof(data_cpl), skb->len, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			goto abort_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		data_len = ntohs(data_cpl.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			"skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		len += sizeof(data_cpl) + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	} else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	__pskb_trim(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	__skb_queue_tail(&csk->receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	cxgbi_conn_pdu_ready(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) abort_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	send_abort_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * Process TX_DATA_ACK CPL messages: -> host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * Process an acknowledgment of WR completion.  Advance snd_una and send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * next batch of work requests from the write queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct cxgbi_sock *csk = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct cpl_wr_ack *hdr = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		"csk 0x%p,%u,0x%lx,%u, cr %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	__kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * for each connection, pre-allocate skbs needed for close/abort requests. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * that we can service the request right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static int alloc_cpls(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (!csk->cpl_close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (!csk->cpl_abort_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		goto free_cpl_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (!csk->cpl_abort_rpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		goto free_cpl_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) free_cpl_skbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	cxgbi_sock_free_cpl_skbs(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) static void l2t_put(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (csk->l2t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		l2t_release(t3dev, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		csk->l2t = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * release_offload_resources - release offload resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * Release resources held by an offload connection (TID, L2T entry, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static void release_offload_resources(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		"csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	csk->rss_qid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	cxgbi_sock_free_cpl_skbs(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (csk->wr_cred != csk->wr_max_cred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		cxgbi_sock_purge_wr_queue(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		cxgbi_sock_reset_wr_list(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	l2t_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		free_atid(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	csk->dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	csk->cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static void update_address(struct cxgbi_hba *chba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (chba->ipv4addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (chba->vdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		    chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			cxgb3i_set_private_ipv4addr(chba->ndev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			pr_info("%s set %pI4.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				chba->vdev->name, &chba->ipv4addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		} else if (chba->ipv4addr !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				cxgb3i_get_private_ipv4addr(chba->ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			pr_info("%s set %pI4.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				chba->ndev->name, &chba->ipv4addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	} else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		if (chba->vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			cxgb3i_set_private_ipv4addr(chba->vdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		cxgb3i_set_private_ipv4addr(chba->ndev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static int init_act_open(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct dst_entry *dst = csk->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct net_device *ndev = cdev->ports[csk->port_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	update_address(chba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (chba->ipv4addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		csk->saddr.sin_addr.s_addr = chba->ipv4addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	csk->rss_qid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	csk->l2t = t3_l2t_get(t3dev, dst, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			      &csk->daddr.sin_addr.s_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (!csk->l2t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		pr_err("NO l2t available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (csk->atid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		pr_err("NO atid available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		goto put_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		goto free_atid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	skb->sk = (struct sock *)csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	set_arp_failure_handler(skb, act_open_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	csk->snd_win = cxgb3i_snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	csk->rcv_win = cxgb3i_rcv_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	csk->wr_una_cred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	cxgbi_sock_reset_wr_list(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	csk->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		"csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		csk, csk->state, csk->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		&csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	send_act_open_req(csk, skb, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) free_atid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	cxgb3_free_atid(t3dev, csk->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) put_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	l2t_release(t3dev, csk->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	csk->l2t = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	[CPL_ACT_ESTABLISH] = do_act_establish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	[CPL_PEER_CLOSE] = do_peer_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	[CPL_ABORT_REQ_RSS] = do_abort_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	[CPL_ABORT_RPL_RSS] = do_abort_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	[CPL_TX_DMA_ACK] = do_wr_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	[CPL_ISCSI_HDR] = do_iscsi_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  * @cdev:	cxgbi adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	struct adap_ports port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct ofld_page_info rx_page_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	unsigned int wr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	    t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	    t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (cxgb3i_max_connect > CXGBI_MAX_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		cxgb3i_max_connect = CXGBI_MAX_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 					cxgb3i_max_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	init_wr_tab(wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	cdev->csk_release_offload_resources = release_offload_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	cdev->csk_push_tx_frames = push_tx_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	cdev->csk_send_abort_req = send_abort_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	cdev->csk_send_close_req = send_close_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	cdev->csk_send_rx_credits = send_rx_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	cdev->csk_alloc_cpls = alloc_cpls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	cdev->csk_init_act_open = init_act_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	pr_info("cdev 0x%p, offload up, added.\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)  * functions to program the pagepod in h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				   V_ULPTX_CMD(ULP_MEM_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			 V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		       struct cxgbi_task_tag_info *ttinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	unsigned int idx = ttinfo->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	unsigned int npods = ttinfo->npods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct scatterlist *sg = ttinfo->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct cxgbi_pagepod *ppod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	struct ulp_mem_io *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	unsigned int sg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 					       IPPOD_SIZE, 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		ulp_mem_io_set_hdr(skb, pm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		req = (struct ulp_mem_io *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		ppod = (struct cxgbi_pagepod *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		sg_off = i * PPOD_PAGES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				       &sg_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		skb->priority = CPL_PRIORITY_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		cxgb3_ofld_send(ppm->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			  struct cxgbi_task_tag_info *ttinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	unsigned int idx = ttinfo->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	unsigned int npods = ttinfo->npods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		  "cdev 0x%p, clear idx %u, npods %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		  cdev, idx, npods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 					       IPPOD_SIZE, 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			       cdev, idx, i, npods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		ulp_mem_io_set_hdr(skb, pm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		skb->priority = CPL_PRIORITY_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		cxgb3_ofld_send(ppm->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				unsigned int tid, int pg_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct cpl_set_tcb_field *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		"csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	/* set up ulp submode and page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	req = (struct cpl_set_tcb_field *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	req->reply = V_NO_REPLY(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	req->cpu_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	req->word = htons(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	req->mask = cpu_to_be64(0xF0000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	req->val = cpu_to_be64(val << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	skb->priority = CPL_PRIORITY_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	cxgb3_ofld_send(csk->cdev->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  * cxgb3i_setup_conn_digest - setup conn. digest setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * @csk: cxgb tcp socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * @tid: connection id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * @hcrc: header digest enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * @dcrc: data digest enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * set up the iscsi digest settings for a connection identified by tid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 				 int hcrc, int dcrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	struct cpl_set_tcb_field *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		"csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/* set up ulp submode and page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	req = (struct cpl_set_tcb_field *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	req->reply = V_NO_REPLY(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	req->cpu_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	req->word = htons(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	req->mask = cpu_to_be64(0x0F000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	req->val = cpu_to_be64(val << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	skb->priority = CPL_PRIORITY_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	cxgb3_ofld_send(csk->cdev->lldev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * @cdev: cxgb3i adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  * initialize the ddp pagepod manager for a given adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct net_device *ndev = cdev->ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct cxgbi_tag_format tformat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	unsigned int ppmax, tagmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct ulp_iscsi_info uinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		pr_err("%s, failed to get iscsi param %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		       ndev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (uinfo.llimit >= uinfo.ulimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			ndev->name, uinfo.llimit, uinfo.ulimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	tagmask = cxgbi_tagmask_set(ppmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		tagmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	cxgbi_tagmask_check(tagmask, &tformat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				  (uinfo.ulimit - uinfo.llimit + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				  uinfo.llimit, uinfo.llimit, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		uinfo.tagmask = tagmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			pr_err("T3 %s fail to set iscsi param %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			       ndev->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			cdev->flags |= CXGBI_FLAG_DDP_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	cdev->csk_ddp_set_map = ddp_set_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	cdev->csk_ddp_clear_map = ddp_clear_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	cdev->cdev2ppm = cdev2ppm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 				  uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				  uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static void cxgb3i_dev_close(struct t3cdev *t3dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	cxgbi_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  * @t3dev: t3cdev adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static void cxgb3i_dev_open(struct t3cdev *t3dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	struct adapter *adapter = tdev2adap(t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		pr_info("0x%p, updating.\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	cdev = cxgbi_device_register(0, adapter->params.nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		pr_warn("device 0x%p register failed.\n", t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	cdev->lldev = t3dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	cdev->pdev = adapter->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	cdev->ports = adapter->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	cdev->nports = adapter->params.nports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	cdev->mtus = adapter->params.mtus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	cdev->nmtus = NMTUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	cdev->itp = &cxgb3i_iscsi_transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	err = cxgb3i_ddp_init(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		pr_info("0x%p ddp init failed %d\n", cdev, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	err = cxgb3i_ofld_init(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		pr_info("0x%p offload init failed\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				&cxgb3i_host_template, cxgb3i_stt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	for (i = 0; i < cdev->nports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		cdev->hbas[i]->ipv4addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			cxgb3i_get_private_ipv4addr(cdev->ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		cdev, cdev ? cdev->flags : 0, t3dev, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	cxgbi_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	log_debug(1 << CXGBI_DBG_TOE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		"0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		t3dev, cdev, event, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	case OFFLOAD_STATUS_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	case OFFLOAD_STATUS_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * cxgb3i_init_module - module init entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * initialize any driver wide global data structures and register itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  *	with the cxgb3 module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static int __init cxgb3i_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	printk(KERN_INFO "%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	cxgb3_register_client(&t3_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * cxgb3i_exit_module - module cleanup/exit entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * go through the driver hba list and for each hba, release any resource held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  *	and unregisters iscsi transport and the cxgb3 module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void __exit cxgb3i_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	cxgb3_unregister_client(&t3_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) module_init(cxgb3i_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) module_exit(cxgb3i_exit_module);