^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic FCoE Offload Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2016-2018 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _QEDFC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _QEDFC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <scsi/libfcoe.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <scsi/libfc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <scsi/fc/fc_fip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <scsi/fc/fc_fc2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <scsi/fc_encode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* qedf_hsi.h needs to before included any qed includes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "qedf_hsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/qed/qed_if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/qed/qed_fcoe_if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/qed/qed_ll2_if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "qedf_version.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "qedf_dbg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "drv_fcoe_fw_funcs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Helpers to extract upper and lower 32-bits of pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define U64_HI(val) ((u32)(((u64)(val)) >> 32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define QEDF_DESCR "QLogic FCoE Offload Driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define QEDF_MODULE_NAME "qedf"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define QEDF_FLOGI_RETRY_CNT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define QEDF_RPORT_RETRY_CNT 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define QEDF_MAX_SESSIONS 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define QEDF_MAX_PAYLOAD 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define QEDF_MAX_BDS_PER_CMD 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define QEDF_MAX_BD_LEN 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define QEDF_BD_SPLIT_SZ 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define QEDF_PAGE_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define QED_HW_DMA_BOUNDARY 0xfff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define QEDF_MFS (QEDF_MAX_PAYLOAD + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) sizeof(struct fc_frame_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define QEDF_MAX_NPIV 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define QEDF_TM_TIMEOUT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define QEDF_ABORT_TIMEOUT (10 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define QEDF_CLEANUP_TIMEOUT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define QEDF_MAX_CDB_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define UPSTREAM_REMOVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define UPSTREAM_KEEP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct qedf_mp_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) uint32_t req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void *req_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) dma_addr_t req_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct scsi_sge *mp_req_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dma_addr_t mp_req_bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct fc_frame_header req_fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) uint32_t resp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void *resp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dma_addr_t resp_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct scsi_sge *mp_resp_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dma_addr_t mp_resp_bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct fc_frame_header resp_fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct qedf_els_cb_arg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct qedf_ioreq *aborted_io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 op; /* Used to keep track of ELS op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) uint16_t l2_oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 offset; /* Used for sequence cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u8 r_ctl; /* Used for sequence cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum qedf_ioreq_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) QEDF_IOREQ_EV_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) QEDF_IOREQ_EV_ABORT_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) QEDF_IOREQ_EV_ABORT_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) QEDF_IOREQ_EV_SEND_RRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) QEDF_IOREQ_EV_ELS_TMO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) QEDF_IOREQ_EV_ELS_ERR_DETECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) QEDF_IOREQ_EV_ELS_FLUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) QEDF_IOREQ_EV_CLEANUP_SUCCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) QEDF_IOREQ_EV_CLEANUP_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define FC_GOOD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct qedf_ioreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) uint16_t xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct scsi_cmnd *sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define QEDF_SCSI_CMD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define QEDF_TASK_MGMT_CMD 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define QEDF_ABTS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define QEDF_ELS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define QEDF_CLEANUP 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define QEDF_SEQ_CLEANUP 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u8 cmd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define QEDF_CMD_OUTSTANDING 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define QEDF_CMD_IN_ABORT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define QEDF_CMD_IN_CLEANUP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define QEDF_CMD_SRR_SENT 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define QEDF_CMD_DIRTY 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define QEDF_CMD_ERR_SCSI_DONE 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 io_req_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) uint8_t tm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct qedf_rport *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define QEDF_CMD_ST_INACTIVE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define QEDFC_CMD_ST_IO_ACTIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define QEDFC_CMD_ST_ABORT_ACTIVE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define QEDFC_CMD_ST_ABORT_ACTIVE_EH 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define QEDFC_CMD_ST_CLEANUP_ACTIVE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define QEDFC_CMD_ST_CLEANUP_ACTIVE_EH 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define QEDFC_CMD_ST_RRQ_ACTIVE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define QEDFC_CMD_ST_RRQ_WAIT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define QEDFC_CMD_ST_OXID_RETIRE_WAIT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define QEDFC_CMD_ST_TMF_ACTIVE 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define QEDFC_CMD_ST_DRAIN_ACTIVE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define QEDFC_CMD_ST_CLEANED 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define QEDFC_CMD_ST_ELS_ACTIVE 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) atomic_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum qedf_ioreq_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) size_t data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* ID: 001: Alloc cmd (qedf_alloc_cmd) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* ID: 002: Initiate ABTS (qedf_initiate_abts) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* ID: 003: For RRQ (qedf_process_abts_compl) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct qedf_cmd_mgr *cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct io_bdt *bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct delayed_work timeout_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct completion tm_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct completion abts_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct completion cleanup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct e4_fcoe_task_context *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct fcoe_task_params *task_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct scsi_sgl_task_params *sgl_task_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Need to allocate enough room for both sense data and FCP response data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * which has a max length of 8 bytes according to spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) uint8_t *sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dma_addr_t sense_buffer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 fcp_resid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 fcp_rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 fcp_sns_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u8 cdb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u8 fcp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u8 fcp_rsp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u8 scsi_comp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define QEDF_MAX_REUSE 0xfff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u16 reuse_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct qedf_mp_req mp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct qedf_els_cb_arg *cb_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int fp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned int int_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define QEDF_IOREQ_UNKNOWN_SGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define QEDF_IOREQ_SLOW_SGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define QEDF_IOREQ_FAST_SGE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u8 sge_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct delayed_work rrq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Used for sequence level recovery; i.e. REC/SRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) uint32_t rx_buf_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) uint32_t tx_buf_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) uint32_t rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) uint32_t task_retry_identifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Used to tell if we need to return a SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * during some form of error processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool return_scsi_cmd_on_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) extern struct workqueue_struct *qedf_io_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct qedf_rport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spinlock_t rport_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define QEDF_RPORT_SESSION_READY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define QEDF_RPORT_UPLOADING_CONNECTION 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define QEDF_RPORT_IN_RESET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define QEDF_RPORT_IN_LUN_RESET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define QEDF_RPORT_IN_TARGET_RESET 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int lun_reset_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long retry_delay_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct fc_rport_priv *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 handle; /* Handle from qed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u32 fw_cid; /* fw_cid from qed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void __iomem *p_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Send queue management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) atomic_t free_sqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) atomic_t ios_to_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) atomic_t num_active_ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct fcoe_wqe *sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dma_addr_t sq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u16 sq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u16 fw_sq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u16 sq_con_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 sq_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void *sq_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dma_addr_t sq_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 sq_pbl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define QEDF_RPORT_TYPE_DISK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define QEDF_RPORT_TYPE_TAPE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) uint dev_type; /* Disk or tape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct list_head peers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Used to contain LL2 skb's in ll2_skb_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct qedf_skb_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct qedf_fastpath {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define QEDF_SB_ID_NULL 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u16 sb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct qed_sb_info *sb_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Keep track of number of completions on this fastpath */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long completions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) uint32_t cq_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Used to pass fastpath information needed to process CQEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct qedf_io_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct fcoe_cqe cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct qedf_glbl_q_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u64 hw_p_cq; /* Completion queue PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u64 hw_p_rq; /* Request queue PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u64 hw_p_cmdq; /* Command queue PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct global_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct fcoe_cqe *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dma_addr_t cq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 cq_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 cq_cons_idx; /* Completion queue consumer index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 cq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void *cq_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dma_addr_t cq_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 cq_pbl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* I/O tracing entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define QEDF_IO_TRACE_SIZE 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct qedf_io_log {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define QEDF_IO_TRACE_REQ 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define QEDF_IO_TRACE_RSP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) uint8_t direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) uint16_t task_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) uint32_t port_id; /* Remote port fabric ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned char op; /* SCSI CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) uint8_t lba[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int bufflen; /* SCSI buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned int sg_count; /* Number of SG elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int result; /* Result passed back to mid-layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long jiffies; /* Time stamp when I/O logged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int refcount; /* Reference count for task id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned int req_cpu; /* CPU that the task is queued on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int int_cpu; /* Interrupt CPU that the task is received on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int rsp_cpu; /* CPU that task is returned on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u8 sge_type; /* Did we take the slow, single or fast SGE path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Number of entries in BDQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define QEDF_BDQ_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define QEDF_BDQ_BUF_SIZE 2072
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* DMA coherent buffers for BDQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct qedf_bdq_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void *buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) dma_addr_t buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Main adapter struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct qedf_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct qedf_dbg_ctx dbg_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct fcoe_ctlr ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u8 data_src_addr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define QEDF_LINK_DOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define QEDF_LINK_UP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) atomic_t link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define QEDF_DCBX_PENDING 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define QEDF_DCBX_DONE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) atomic_t dcbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define QEDF_NULL_VLAN_ID -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define QEDF_FALLBACK_VLAN 1002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define QEDF_DEFAULT_PRIO 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct qed_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct qed_dev_fcoe_info dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct qed_int_info int_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) uint16_t last_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) spinlock_t hba_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u64 wwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u64 wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u8 __aligned(16) mac[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct list_head fcports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) atomic_t num_offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned int curr_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct workqueue_struct *ll2_recv_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct workqueue_struct *link_update_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct delayed_work link_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct delayed_work link_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct completion flogi_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct completion fipvlan_compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Used to tell if we're in the window where we are waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * the link to come back up before informting fcoe that the link is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) atomic_t link_down_tmo_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define QEDF_TIMER_INTERVAL (1 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct timer_list timer; /* One second book keeping timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define QEDF_DRAIN_ACTIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define QEDF_LL2_STARTED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define QEDF_UNLOADING 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define QEDF_GRCDUMP_CAPTURE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define QEDF_IN_RECOVERY 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define QEDF_DBG_STOP_IO 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define QEDF_PROBING 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned long flags; /* Miscellaneous state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int fipvlan_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u8 num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct global_queue **global_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Pointer to array of queue structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct qedf_glbl_q_params *p_cpuq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Physical address of array of queue structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dma_addr_t hw_p_cpuq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void *bdq_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dma_addr_t bdq_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) size_t bdq_pbl_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void *bdq_pbl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dma_addr_t bdq_pbl_list_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u8 bdq_pbl_list_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void __iomem *bdq_primary_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void __iomem *bdq_secondary_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) uint16_t bdq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Structure for holding all the fastpath for this qedf_ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct qedf_fastpath *fp_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct qed_fcoe_tid tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct qedf_cmd_mgr *cmd_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Holds the PF parameters we pass to qed to start he FCoE function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct qed_pf_params pf_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Used to time middle path ELS and TM commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct workqueue_struct *timer_work_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define QEDF_IO_WORK_MIN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mempool_t *io_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct workqueue_struct *dpc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct delayed_work recovery_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct delayed_work board_disable_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct delayed_work grcdump_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct delayed_work stag_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 slow_sge_ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u32 fast_sge_ios;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) uint8_t *grcdump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) uint32_t grcdump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spinlock_t io_trace_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) uint16_t io_trace_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) bool stop_io_on_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u32 flogi_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u32 flogi_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u32 flogi_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Used for fc statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct mutex stats_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u64 input_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u64 output_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u64 control_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) u64 packet_aborts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) u64 alloc_failures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) u8 lun_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u8 target_resets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u8 task_set_fulls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u8 busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Used for flush routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct mutex flush_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct io_bdt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct qedf_ioreq *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct scsi_sge *bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dma_addr_t bd_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u16 bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct qedf_cmd_mgr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct qedf_ctx *qedf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct io_bdt **io_bdt_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #define FCOE_PARAMS_NUM_TASKS 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) atomic_t free_list_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Usage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * ptr = qedf_get_task_mem(&qedf->tasks, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return (void *)(info->blocks[tid / info->num_tids_per_block] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) (tid % info->num_tids_per_block) * info->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Externs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #define QEDF_DEFAULT_LOG_MASK 0x3CFB6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) extern const struct qed_fcoe_ops *qed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) extern uint qedf_dump_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) extern uint qedf_io_tracing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) extern uint qedf_stop_io_on_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) extern uint qedf_link_down_tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) extern bool qedf_retry_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) extern uint qedf_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) extern int qedf_queuecommand(struct Scsi_Host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct scsi_cmnd *sc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) extern u8 *qedf_get_src_mac(struct fc_lport *lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) extern void qedf_process_error_detect(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) extern void qedf_release_cmd(struct kref *ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) bool return_scsi_cmd_on_abts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) u8 cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) extern struct device_attribute *qedf_host_attrs[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned int timer_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) extern void qedf_ring_doorbell(struct qedf_rport *fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct qedf_ioreq *els_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bool return_scsi_cmd_on_abts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) bool qedf_wait_for_upload(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct fcoe_cqe *cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) extern void qedf_restart_rport(struct qedf_rport *fcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) extern int qedf_post_io_req(struct qedf_rport *fcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) extern int qedf_send_flogi(struct qedf_ctx *qedf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) extern void qedf_get_protocol_tlv_data(void *dev, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) extern void qedf_fp_io_handler(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) extern void qedf_wq_grcdump(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void qedf_stag_change_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void qedf_ctx_soft_reset(struct fc_lport *lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) extern void qedf_board_disable_work(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) extern void qedf_schedule_hw_err_handler(void *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) enum qed_hw_err_type err_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #define FCOE_WORD_TO_BYTE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #define QEDF_MAX_TASK_NUM 0xFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #define QL45xxx 0x165C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #define QL41xxx 0x8080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #define MAX_CT_PAYLOAD 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #define DISCOVERED_PORTS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #define NUMBER_OF_PORTS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct fip_vlan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ethhdr eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct fip_header fip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct fip_mac_desc mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct fip_wwn_desc wwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* SQ/CQ Sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #define GBL_RSVD_TASKS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #define NUM_TASKS_PER_CONNECTION 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #define NUM_RW_TASKS_PER_CONNECTION 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #define QEDF_FCOE_PARAMS_GL_RQ_PI 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #define QEDF_FCOE_PARAMS_GL_CMD_PI 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #define QEDF_READ (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #define QEDF_WRITE (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #define MAX_FIBRE_LUNS 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) num_online_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * PCI function probe defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Probe/remove called during normal PCI probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #define QEDF_MODE_NORMAL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Probe/remove called from qed error recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #define QEDF_MODE_RECOVERY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #define SUPPORTED_25000baseKR_Full (1<<27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #define SUPPORTED_50000baseKR2_Full (1<<28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #define SUPPORTED_100000baseKR4_Full (1<<29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #define SUPPORTED_100000baseCR4_Full (1<<30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif