^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file contains the code that low level functions that interact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * with 57712 FCoE firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2008-2013 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2014-2016 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2016-2017 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "bnx2fc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct fcoe_kcqe *new_cqe_kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct fcoe_kcqe *ofld_kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct fcoe_kcqe *ofld_kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct fcoe_kcqe *destroy_kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct fcoe_kwqe_stat stat_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct kwqe *kwqe_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int num_kwqes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) stat_req.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) kwqe_arr[0] = (struct kwqe *) &stat_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Send down FCoE firmware init KWQEs which initiates the initial handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * with the f/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct fcoe_kwqe_init1 fcoe_init1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct fcoe_kwqe_init2 fcoe_init2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct fcoe_kwqe_init3 fcoe_init3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct kwqe *kwqe_arr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int num_kwqes = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!hba->cnic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* fill init1 KWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) fcoe_init1.num_tasks = hba->max_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) fcoe_init1.task_list_pbl_addr_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) (u32) ((u64) hba->task_ctx_bd_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) fcoe_init1.flags = (PAGE_SHIFT <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* fill init2 KWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ((u64) hba->hash_tbl_pbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) fcoe_init2.t2_hash_tbl_addr_hi = (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ((u64) hba->t2_hash_tbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* fill init3 KWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) fcoe_init3.error_bit_map_lo = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) fcoe_init3.error_bit_map_hi = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * enable both cached connection and cached tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fcoe_init3.perf_config = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct fcoe_kwqe_destroy fcoe_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct kwqe *kwqe_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int num_kwqes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* fill destroy KWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @port: port structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @tgt: bnx2fc_rport structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct fc_lport *lport = port->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct kwqe *kwqe_arr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct fcoe_kwqe_conn_offload1 ofld_req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct fcoe_kwqe_conn_offload2 ofld_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct fcoe_kwqe_conn_offload3 ofld_req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct fcoe_kwqe_conn_offload4 ofld_req4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct fc_rport_priv *rdata = tgt->rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct fc_rport *rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int num_kwqes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u16 conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Initialize offload request 1 structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ofld_req1.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) conn_id = (u16)tgt->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ofld_req1.fcoe_conn_id = conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ofld_req1.rq_first_pbe_addr_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) (u32)((u64) tgt->rq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ofld_req1.rq_prod = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* Initialize offload request 2 structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ofld_req2.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Initialize offload request 3 structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ofld_req3.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ofld_req3.vlan_tag = interface->vlan_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) port_id = fc_host_port_id(lport->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (port_id == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Store s_id of the initiator for further reference. This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * be used during disable/destroy during linkdown processing as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * when the lport is reset, the port_id also is reset to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) tgt->sid = port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ofld_req3.s_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ofld_req3.d_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ofld_req3.tx_total_conc_seqs = rdata->max_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ofld_req3.rx_max_fc_pay_len = lport->mfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ofld_req3.rx_open_seqs_exch_c3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* set mul_n_port_ids supported flag to 0, until it is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ofld_req3.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Info from PLOGI response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Info from PRLI response, this info is used for sequence level error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * recovery support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (tgt->dev_type == TYPE_TAPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ofld_req3.flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ? 1 : 0) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* vlan flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ofld_req3.flags |= (interface->vlan_enabled <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* C2_VALID and ACK flags are not set as they are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Initialize offload request 4 structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ofld_req4.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* local mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* fcf mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ofld_req4.confq_pbl_base_addr_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) (u32)((u64) tgt->confq_pbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kwqe_arr[0] = (struct kwqe *) &ofld_req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kwqe_arr[1] = (struct kwqe *) &ofld_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kwqe_arr[2] = (struct kwqe *) &ofld_req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kwqe_arr[3] = (struct kwqe *) &ofld_req4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @port: port structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @tgt: bnx2fc_rport structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int bnx2fc_send_session_enable_req(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct kwqe *kwqe_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct fcoe_kwqe_conn_enable_disable enbl_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct fc_lport *lport = port->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct fc_rport *rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int num_kwqes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) memset(&enbl_req, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sizeof(struct fcoe_kwqe_conn_enable_disable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) enbl_req.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* local mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) port_id = fc_host_port_id(lport->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (port_id != tgt->sid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) "sid = 0x%x\n", port_id, tgt->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) port_id = tgt->sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) enbl_req.s_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) enbl_req.d_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) enbl_req.vlan_tag = interface->vlan_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) enbl_req.vlan_flag = interface->vlan_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) enbl_req.context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) enbl_req.conn_id = tgt->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) kwqe_arr[0] = (struct kwqe *) &enbl_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * bnx2fc_send_session_disable_req - initiates FCoE Session disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @port: port structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @tgt: bnx2fc_rport structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int bnx2fc_send_session_disable_req(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct fcoe_kwqe_conn_enable_disable disable_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct kwqe *kwqe_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct fc_rport *rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int num_kwqes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) memset(&disable_req, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) sizeof(struct fcoe_kwqe_conn_enable_disable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) disable_req.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) port_id = tgt->sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) disable_req.s_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) disable_req.d_id[0] = (port_id & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) disable_req.context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) disable_req.conn_id = tgt->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) disable_req.vlan_tag = interface->vlan_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) disable_req.vlan_tag |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) disable_req.vlan_flag = interface->vlan_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) kwqe_arr[0] = (struct kwqe *) &disable_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @tgt: bnx2fc_rport structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct fcoe_kwqe_conn_destroy destroy_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct kwqe *kwqe_arr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int num_kwqes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) destroy_req.hdr.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) destroy_req.context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) destroy_req.conn_id = tgt->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) kwqe_arr[0] = (struct kwqe *) &destroy_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (hba->cnic && hba->cnic->submit_kwqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct bnx2fc_lport *blport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) list_for_each_entry(blport, &hba->vports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (blport->lport == lport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) spin_unlock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_unlock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void bnx2fc_unsol_els_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct bnx2fc_unsol_els *unsol_els;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct fc_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct bnx2fc_hba *hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) lport = unsol_els->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fp = unsol_els->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) hba = unsol_els->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (is_valid_lport(hba, lport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) fc_exch_recv(lport, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kfree(unsol_els);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u32 frame_len, u16 l2_oxid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct fcoe_port *port = tgt->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct fc_lport *lport = port->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct bnx2fc_unsol_els *unsol_els;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct fc_frame_header *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!unsol_els) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) l2_oxid, frame_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) payload_len = frame_len - sizeof(struct fc_frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) fp = fc_frame_alloc(lport, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) printk(KERN_ERR PFX "fc_frame_alloc failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) kfree(unsol_els);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) fh = (struct fc_frame_header *) fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Copy FC Frame header and payload into the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) memcpy(fh, buf, frame_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (l2_oxid != FC_XID_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) fh->fh_ox_id = htons(l2_oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) skb = fp_skb(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (fh->fh_type == FC_TYPE_ELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) op = fc_frame_payload_op(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if ((op == ELS_TEST) || (op == ELS_ESTC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) (op == ELS_FAN) || (op == ELS_CSU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * No need to reply for these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * ELS requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) kfree(unsol_els);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) crc = fcoe_fc_crc(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) fc_frame_init(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) fr_dev(fp) = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) fr_sof(fp) = FC_SOF_I3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) fr_eof(fp) = FC_EOF_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) fr_crc(fp) = cpu_to_le32(~crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsol_els->lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsol_els->hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unsol_els->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kfree(unsol_els);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u8 num_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct fcoe_err_report_entry *err_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned char *rq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) unsigned char *buf = NULL, *buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) u32 frame_len, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct bnx2fc_cmd *io_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct bnx2fc_interface *interface = tgt->port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) u64 err_warn_bit_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u8 err_warn = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (rq_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) buf = rq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!buf1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) for (i = 0; i < num_rq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rq_data = (unsigned char *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bnx2fc_get_next_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) len = BNX2FC_RQ_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) memcpy(buf1, rq_data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) buf1 += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) FC_XID_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (buf != rq_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) bnx2fc_return_rqe(tgt, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case FCOE_ERROR_DETECTION_CQE_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * In case of error reporting CQE a single RQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * is consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) num_rq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) err_entry = (struct fcoe_err_report_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) bnx2fc_get_next_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xid = err_entry->fc_hdr.ox_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) err_entry->data.err_warn_bitmap_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) err_entry->data.err_warn_bitmap_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (xid > hba->max_xid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) &io_req->req_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) "progress.. ignore unsol err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) err_warn_bit_map = (u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) (u64)err_entry->data.err_warn_bitmap_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (err_warn_bit_map & (u64)((u64)1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err_warn = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * If ABTS is already in progress, and FW error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * received after that, do not cancel the timeout_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * and let the error recovery continue by explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * logging out the target, when the ABTS eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) "in ABTS processing\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (tgt->dev_type != TYPE_TAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto skip_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) switch (err_warn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) case FCOE_ERROR_CODE_DATA_OOO_RO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) memcpy(&io_req->err_entry, err_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) sizeof(struct fcoe_err_report_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!test_bit(BNX2FC_FLAG_SRR_SENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) &io_req->req_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) rc = bnx2fc_send_rec(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto skip_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) printk(KERN_ERR PFX "SRR in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto ret_err_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) skip_rec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * Cancel the timeout_work, as we received IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * completion with FW error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (cancel_delayed_work(&io_req->timeout_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) kref_put(&io_req->refcount, bnx2fc_cmd_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) rc = bnx2fc_initiate_abts(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (rc != SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) printk(KERN_ERR PFX "err_warn: initiate_abts "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) "failed xid = 0x%x. issue cleanup\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) bnx2fc_initiate_cleanup(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ret_err_rqe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) bnx2fc_return_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case FCOE_WARNING_DETECTION_CQE_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *In case of warning reporting CQE a single RQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * is consumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) num_rq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) err_entry = (struct fcoe_err_report_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bnx2fc_get_next_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) err_entry->data.err_warn_bitmap_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err_entry->data.err_warn_bitmap_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (xid > hba->max_xid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto ret_warn_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) err_warn_bit_map = (u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) (u64)err_entry->data.err_warn_bitmap_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (err_warn_bit_map & ((u64)1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) err_warn = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (!io_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto ret_warn_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) goto ret_warn_rqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) memcpy(&io_req->err_entry, err_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) sizeof(struct fcoe_err_report_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* REC_TOV is not a warning code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ret_warn_rqe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bnx2fc_return_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned char *rq_data, u8 num_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct fcoe_task_ctx_entry *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct fcoe_port *port = tgt->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct bnx2fc_cmd *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u8 cmd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u8 rx_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (io_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Timestamp IO completion time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) cmd_type = io_req->cmd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Process other IO completion types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) switch (cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) case BNX2FC_SCSI_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) rq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bnx2fc_process_abts_compl(io_req, task, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) else if (rx_state ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) bnx2fc_process_cleanup_compl(io_req, task, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) printk(KERN_ERR PFX "Invalid rx state - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) case BNX2FC_TASK_MGMT_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case BNX2FC_ABTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * ABTS request received by firmware. ABTS response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * will be delivered to the task belonging to the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * that was aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) kref_put(&io_req->refcount, bnx2fc_cmd_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) case BNX2FC_ELS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) bnx2fc_process_els_compl(io_req, task, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) bnx2fc_process_abts_compl(io_req, task, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) else if (rx_state ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bnx2fc_process_cleanup_compl(io_req, task, num_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) printk(KERN_ERR PFX "Invalid rx state = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) case BNX2FC_CLEANUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) kref_put(&io_req->refcount, bnx2fc_cmd_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case BNX2FC_SEQ_CLEANUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) kref_put(&io_req->refcount, bnx2fc_cmd_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) FCOE_CQE_TOGGLE_BIT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) msg = *((u32 *)rx_db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) writel(cpu_to_le32(msg), tgt->ctx_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) unsigned char *rq_data, u8 num_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct fcoe_task_ctx_entry *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct bnx2fc_work *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (!work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) INIT_LIST_HEAD(&work->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) work->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) work->wqe = wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) work->num_rq = num_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) work->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (rq_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* Pending work request completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) unsigned int cpu = wqe % num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct bnx2fc_percpu_s *fps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct bnx2fc_work *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct fcoe_task_ctx_entry *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct fcoe_task_ctx_entry *task_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct fcoe_port *port = tgt->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned char *rq_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int task_idx, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) u16 xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u8 num_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (xid >= hba->max_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_err(PFX "ERROR:xid out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) task_idx = xid / BNX2FC_TASKS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) index = xid % BNX2FC_TASKS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) task = &task_page[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!num_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto num_rq_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rq_data = bnx2fc_get_next_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (num_rq > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /* We do not need extra sense data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) for (i = 1; i < num_rq; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bnx2fc_get_next_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (rq_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* return RQ entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) for (i = 0; i < num_rq; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) bnx2fc_return_rqe(tgt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) num_rq_zero:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) fps = &per_cpu(bnx2fc_percpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) spin_lock_bh(&fps->fp_work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (fps->iothread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) num_rq, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) list_add_tail(&work->list, &fps->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) wake_up_process(fps->iothread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) spin_unlock_bh(&fps->fp_work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) spin_unlock_bh(&fps->fp_work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) bnx2fc_process_cq_compl(tgt, wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) rq_data_buff, num_rq, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct fcoe_cqe *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) u32 cq_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct fcoe_cqe *cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) u32 num_free_sqes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) u32 num_cqes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u16 wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * cq_lock is a low contention lock used to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * the CQ data structure from being freed up during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * the upload operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_lock_bh(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!tgt->cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) spin_unlock_bh(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cq = tgt->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) cq_cons = tgt->cq_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) cqe = &cq[cq_cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) (tgt->cq_curr_toggle_bit <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) FCOE_CQE_TOGGLE_BIT_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* new entry on the cq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (wqe & FCOE_CQE_CQE_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Unsolicited event notification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) bnx2fc_process_unsol_compl(tgt, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (bnx2fc_pending_work(tgt, wqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) num_free_sqes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cqe++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) tgt->cq_cons_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) num_cqes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) tgt->cq_cons_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) cqe = cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) tgt->cq_curr_toggle_bit =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 1 - tgt->cq_curr_toggle_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (num_cqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /* Arm CQ only if doorbell is mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (tgt->ctx_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) bnx2fc_arm_cq(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) atomic_add(num_free_sqes, &tgt->free_sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) spin_unlock_bh(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * bnx2fc_fastpath_notification - process global event queue (KCQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Fast path event notification handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct fcoe_kcqe *new_cqe_kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) bnx2fc_process_new_cqes(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * @ofld_kcqe: connection offload kcqe pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * handle session offload completion, enable the session if offload is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct fcoe_kcqe *ofld_kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct bnx2fc_interface *interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) u32 conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) u32 context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) conn_id = ofld_kcqe->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) context_id = ofld_kcqe->fcoe_conn_context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) tgt = hba->tgt_ofld_list[conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ofld_kcqe->fcoe_conn_context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) interface = tgt->port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (hba != interface->hba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto ofld_cmpl_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * cnic has allocated a context_id for this session; use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * while enabling the session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) tgt->context_id = context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (ofld_kcqe->completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (ofld_kcqe->completion_status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) printk(KERN_ERR PFX "unable to allocate FCoE context "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) "resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* FW offload request successfully completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ofld_cmpl_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) wake_up_interruptible(&tgt->ofld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * @ofld_kcqe: connection offload kcqe pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * handle session enable completion, mark the rport as ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct fcoe_kcqe *ofld_kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct bnx2fc_interface *interface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) u32 conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u32 context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) context_id = ofld_kcqe->fcoe_conn_context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) conn_id = ofld_kcqe->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) tgt = hba->tgt_ofld_list[conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ofld_kcqe->fcoe_conn_context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * context_id should be the same for this target during offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * and enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (tgt->context_id != context_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) printk(KERN_ERR PFX "context id mis-match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) interface = tgt->port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (hba != interface->hba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) goto enbl_cmpl_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!ofld_kcqe->completion_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* enable successful - rport ready for issuing IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) enbl_cmpl_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) wake_up_interruptible(&tgt->ofld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct fcoe_kcqe *disable_kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) u32 conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) conn_id = disable_kcqe->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) tgt = hba->tgt_ofld_list[conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (disable_kcqe->completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) disable_kcqe->completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) wake_up_interruptible(&tgt->upld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* disable successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) BNX2FC_TGT_DBG(tgt, "disable successful\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) wake_up_interruptible(&tgt->upld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct fcoe_kcqe *destroy_kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) u32 conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) conn_id = destroy_kcqe->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) tgt = hba->tgt_ofld_list[conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (!tgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (destroy_kcqe->completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) destroy_kcqe->completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* destroy successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) BNX2FC_TGT_DBG(tgt, "upload successful\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) wake_up_interruptible(&tgt->upld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) switch (err_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) printk(KERN_ERR PFX "init_failure due to NIC error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case FCOE_KCQE_COMPLETION_STATUS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) printk(KERN_ERR PFX "init failure due to compl status err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * bnx2fc_indicae_kcqe - process KCQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * @context: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * @kcq: kcqe pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * @num_cqe: Number of completion queue elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Generic KCQ event handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) u32 num_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct fcoe_kcqe *kcqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) while (i < num_cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) kcqe = (struct fcoe_kcqe *) kcq[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) switch (kcqe->op_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) bnx2fc_fastpath_notification(hba, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) bnx2fc_process_ofld_cmpl(hba, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) case FCOE_KCQE_OPCODE_ENABLE_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) bnx2fc_process_enable_conn_cmpl(hba, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case FCOE_KCQE_OPCODE_INIT_FUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (kcqe->completion_status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) bnx2fc_init_failure(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) kcqe->completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) bnx2fc_get_link_state(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) (u8)hba->pcidev->bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) case FCOE_KCQE_OPCODE_DESTROY_FUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (kcqe->completion_status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) printk(KERN_ERR PFX "DESTROY failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) printk(KERN_ERR PFX "DESTROY success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) wake_up_interruptible(&hba->destroy_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) case FCOE_KCQE_OPCODE_DISABLE_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) bnx2fc_process_conn_disable_cmpl(hba, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case FCOE_KCQE_OPCODE_DESTROY_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) case FCOE_KCQE_OPCODE_STAT_FUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (kcqe->completion_status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) printk(KERN_ERR PFX "STAT failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) complete(&hba->stat_req_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case FCOE_KCQE_OPCODE_FCOE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) printk(KERN_ERR PFX "unknown opcode 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) kcqe->op_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct fcoe_sqe *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) sqe = &tgt->sq[tgt->sq_prod_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Fill SQ WQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /* Advance SQ Prod Idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) tgt->sq_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) sq_db->prod = tgt->sq_prod_idx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) (tgt->sq_curr_toggle_bit << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) msg = *((u32 *)sq_db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) writel(cpu_to_le32(msg), tgt->ctx_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) u32 context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct fcoe_port *port = tgt->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) u32 reg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) resource_size_t reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) reg_base = pci_resource_start(hba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) BNX2X_DOORBELL_PCI_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) tgt->ctx_base = ioremap(reg_base + reg_off, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!tgt->ctx_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) tgt->rq_cons_idx += num_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* return the rq buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) u32 next_prod_idx = tgt->rq_prod_idx + num_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Wrap around RQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) tgt->rq_prod_idx = next_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) tgt->conn_db->rq_prod = tgt->rq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct fcoe_task_ctx_entry *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct bnx2fc_cmd *orig_io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct fcoe_ext_mul_sges_ctx *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) u8 orig_task_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) u16 orig_xid = orig_io_req->xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) u32 context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) u32 orig_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) int bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) orig_task_type = FCOE_TASK_TYPE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) orig_task_type = FCOE_TASK_TYPE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Tx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) task->txwr_rxrd.const_ctx.tx_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* init flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) task->txwr_rxrd.const_ctx.init_flags = task_type <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) task->rxwr_txrd.const_ctx.init_flags = context_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) task->rxwr_txrd.const_ctx.init_flags = context_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) bd_count = orig_io_req->bd_tbl->bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* obtain the appropriate bd entry from relative offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) for (i = 0; i < bd_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (offset < bd[i].buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) offset -= bd[i].buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) phys_addr += (i * sizeof(struct fcoe_bd_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) (u32)phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) (u32)((u64)phys_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) offset; /* adjusted offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* Multiple SGEs were used for this IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) sgl->mul_sgl.sgl_size = bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) sgl->mul_sgl.cur_sge_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) memset(&task->rxwr_only.rx_seq_ctx, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) sizeof(struct fcoe_rx_seq_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct fcoe_task_ctx_entry *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) u16 orig_xid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct bnx2fc_rport *tgt = io_req->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* Tx Write Rx Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* init flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) task->txwr_rxrd.const_ctx.init_flags = task_type <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (tgt->dev_type == TYPE_TAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) FCOE_TASK_DEV_TYPE_TAPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) FCOE_TASK_DEV_TYPE_DISK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /* Tx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) task->txwr_rxrd.const_ctx.tx_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* Rx Read Tx Write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) task->rxwr_txrd.const_ctx.init_flags = context_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct fcoe_task_ctx_entry *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct bnx2fc_rport *tgt = io_req->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct fc_frame_header *fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct fcoe_ext_mul_sges_ctx *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u8 task_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u64 *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) u64 temp_hdr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) u32 context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* Obtain task_type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) (io_req->cmd_type == BNX2FC_ELS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) task_type = FCOE_TASK_TYPE_MIDPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) } else if (io_req->cmd_type == BNX2FC_ABTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) task_type = FCOE_TASK_TYPE_ABTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* Setup the task from io_req for easy reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) io_req->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) io_req->cmd_type, task_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* Tx only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) (u32)mp_req->mp_req_bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) (u32)((u64)mp_req->mp_req_bd_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* Tx Write Rx Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* init flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) task->txwr_rxrd.const_ctx.init_flags = task_type <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (tgt->dev_type == TYPE_TAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) FCOE_TASK_DEV_TYPE_TAPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) FCOE_TASK_DEV_TYPE_DISK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* tx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* Rx Write Tx Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /* rx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) task->rxwr_txrd.const_ctx.init_flags = context_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) fc_hdr = &(mp_req->req_fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (task_type == FCOE_TASK_TYPE_MIDPATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) fc_hdr->fh_rx_id = htons(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) task->rxwr_txrd.var_ctx.rx_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* Fill FC Header into middle path buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) hdr[0] = cpu_to_be64(temp_hdr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) hdr[1] = cpu_to_be64(temp_hdr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) hdr[2] = cpu_to_be64(temp_hdr[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /* Rx Only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (task_type == FCOE_TASK_TYPE_MIDPATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) sgl->mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) sgl->mul_sgl.sgl_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct fcoe_task_ctx_entry *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) u8 task_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct io_bdt *bd_tbl = io_req->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct bnx2fc_rport *tgt = io_req->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct fcoe_cached_sge_ctx *cached_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct fcoe_ext_mul_sges_ctx *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) int dev_type = tgt->dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) u64 *fcp_cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) u64 tmp_fcp_cmnd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) u32 context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) int cnt, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) int bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* Setup the task from io_req for easy reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) io_req->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) task_type = FCOE_TASK_TYPE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) task_type = FCOE_TASK_TYPE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* Tx only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) bd_count = bd_tbl->bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (task_type == FCOE_TASK_TYPE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) cached_sge->cur_buf_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) fcoe_bd_tbl->buf_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) cached_sge->cur_buf_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) fcoe_bd_tbl->buf_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) cached_sge->cur_buf_rem =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) fcoe_bd_tbl->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) task->txwr_rxrd.const_ctx.init_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) (u32)bd_tbl->bd_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) bd_tbl->bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*Tx Write Rx Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /* Init state to NORMAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) task->txwr_rxrd.const_ctx.init_flags |= task_type <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (dev_type == TYPE_TAPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) FCOE_TASK_DEV_TYPE_TAPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) io_req->rec_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) io_req->rec_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) task->txwr_rxrd.const_ctx.init_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) FCOE_TASK_DEV_TYPE_DISK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* tx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* Set initial seq counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* Fill FCP_CMND IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) fcp_cmnd = (u64 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* swap fcp_cmnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) fcp_cmnd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* Rx Write Tx Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) context_id = tgt->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) task->rxwr_txrd.const_ctx.init_flags = context_id <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /* rx flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* Set state to "waiting for the first packet" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) task->rxwr_txrd.var_ctx.rx_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /* Rx Only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (task_type != FCOE_TASK_TYPE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) bd_count = bd_tbl->bd_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (dev_type == TYPE_DISK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (bd_count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) task->txwr_rxrd.const_ctx.init_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) } else if (bd_count == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) fcoe_bd_tbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) cached_sge->second_buf_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) fcoe_bd_tbl->buf_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) cached_sge->second_buf_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) fcoe_bd_tbl->buf_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) task->txwr_rxrd.const_ctx.init_flags |= 1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) sgl->mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) sgl->mul_sgl.sgl_size = bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) sgl->mul_sgl.cur_sge_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) sgl->mul_sgl.sgl_size = bd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * bnx2fc_setup_task_ctx - allocate and map task context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * @hba: pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * allocate memory for task context, and associated BD table to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * by firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct regpair *task_ctx_bdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) int task_ctx_arr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Allocate task context bd table. A page size of bd table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * can map 256 buffers. Each buffer contains 32 task context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * entries. Hence the limit with one page is 8192 task context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) &hba->task_ctx_bd_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!hba->task_ctx_bd_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) printk(KERN_ERR PFX "unable to allocate task context BDT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * Allocate task_ctx which is an array of pointers pointing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * a page containing 32 task contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (!hba->task_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) printk(KERN_ERR PFX "unable to allocate task context array\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * Allocate task_ctx_dma which is an array of dma addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) sizeof(dma_addr_t)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (!hba->task_ctx_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) printk(KERN_ERR PFX "unable to alloc context mapping array\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) for (i = 0; i < task_ctx_arr_sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) &hba->task_ctx_dma[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (!hba->task_ctx[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) printk(KERN_ERR PFX "unable to alloc task context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) addr = (u64)hba->task_ctx_dma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) task_ctx_bdt->lo = cpu_to_le32((u32)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) task_ctx_bdt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) for (i = 0; i < task_ctx_arr_sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (hba->task_ctx[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) hba->task_ctx[i], hba->task_ctx_dma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) hba->task_ctx[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) kfree(hba->task_ctx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) hba->task_ctx_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) kfree(hba->task_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) hba->task_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) hba->task_ctx_bd_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) int task_ctx_arr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (hba->task_ctx_bd_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) hba->task_ctx_bd_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) hba->task_ctx_bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) hba->task_ctx_bd_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (hba->task_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) for (i = 0; i < task_ctx_arr_sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (hba->task_ctx[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) hba->task_ctx[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) hba->task_ctx_dma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) hba->task_ctx[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) kfree(hba->task_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) hba->task_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) kfree(hba->task_ctx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) hba->task_ctx_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int segment_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) u32 *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (hba->hash_tbl_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) pbl = hba->hash_tbl_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) segment_count = hba->hash_tbl_segment_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) for (i = 0; i < segment_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) dma_addr_t dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) dma_address = le32_to_cpu(*pbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) dma_free_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) BNX2FC_HASH_TBL_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) hba->hash_tbl_segments[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) dma_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) kfree(hba->hash_tbl_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) hba->hash_tbl_segments = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (hba->hash_tbl_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) hba->hash_tbl_pbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) hba->hash_tbl_pbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) hba->hash_tbl_pbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) int hash_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int segment_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) int segment_array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) int dma_segment_array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) dma_addr_t *dma_segment_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) u32 *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) sizeof(struct fcoe_hash_table_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) hba->hash_tbl_segment_count = segment_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!hba->hash_tbl_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) printk(KERN_ERR PFX "hash table pointers alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (!dma_segment_array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) goto cleanup_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) for (i = 0; i < segment_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) BNX2FC_HASH_TBL_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) &dma_segment_array[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (!hba->hash_tbl_segments[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) printk(KERN_ERR PFX "hash segment alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto cleanup_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) &hba->hash_tbl_pbl_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (!hba->hash_tbl_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) printk(KERN_ERR PFX "hash table pbl alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) goto cleanup_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) pbl = hba->hash_tbl_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) for (i = 0; i < segment_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) u64 paddr = dma_segment_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) *pbl = cpu_to_le32((u32) paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) *pbl = cpu_to_le32((u32) (paddr >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) pbl = hba->hash_tbl_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) while (*pbl && *(pbl + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) ++pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) kfree(dma_segment_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) cleanup_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) for (i = 0; i < segment_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (hba->hash_tbl_segments[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dma_free_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) BNX2FC_HASH_TBL_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) hba->hash_tbl_segments[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dma_segment_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) kfree(dma_segment_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) cleanup_ht:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) kfree(hba->hash_tbl_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) hba->hash_tbl_segments = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * @hba: Pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) u32 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (bnx2fc_allocate_hash_table(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) &hba->t2_hash_tbl_ptr_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (!hba->t2_hash_tbl_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) bnx2fc_free_fw_resc(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) mem_size = BNX2FC_NUM_MAX_SESS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) sizeof(struct fcoe_t2_hash_table_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) &hba->t2_hash_tbl_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (!hba->t2_hash_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) bnx2fc_free_fw_resc(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) addr = (unsigned long) hba->t2_hash_tbl_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) hba->t2_hash_tbl[i].next.hi = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) PAGE_SIZE, &hba->dummy_buf_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (!hba->dummy_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) bnx2fc_free_fw_resc(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) &hba->stats_buf_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (!hba->stats_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) bnx2fc_free_fw_resc(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) u32 mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (hba->stats_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) hba->stats_buffer, hba->stats_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) hba->stats_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (hba->dummy_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) hba->dummy_buffer, hba->dummy_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) hba->dummy_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (hba->t2_hash_tbl_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) dma_free_coherent(&hba->pcidev->dev, mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) hba->t2_hash_tbl_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) hba->t2_hash_tbl_ptr_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) hba->t2_hash_tbl_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (hba->t2_hash_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) mem_size = BNX2FC_NUM_MAX_SESS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) sizeof(struct fcoe_t2_hash_table_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) dma_free_coherent(&hba->pcidev->dev, mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) hba->t2_hash_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) bnx2fc_free_hash_table(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }