^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SBP2 target driver (SCSI over IEEE1394 in target mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define KMSG_COMPONENT "sbp_target"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/configfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/firewire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/firewire-constants.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi_proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <target/target_core_base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <target/target_core_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <target/target_core_fabric.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "sbp_target.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* FireWire address region for management and command block address handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static const struct fw_address_region sbp_register_region = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) .start = CSR_REGISTER_BASE + 0x10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) .end = 0x1000000000000ULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static const u32 sbp_unit_directory_template[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 0x1200609e, /* unit_specifier_id: NCITS/T10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 0x13010483, /* unit_sw_version: 1155D Rev 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 0x390104d8, /* command_set: SPC-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 0x3b000000, /* command_set_revision: 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 0x3c000001, /* firmware_revision: 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SESSION_MAINTENANCE_INTERVAL HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static atomic_t login_id = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void session_maintenance_work(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int sbp_run_transaction(struct fw_card *, int, int, int, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long long, void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __be32 high, low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) req->node_addr, req->generation, req->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) &high, sizeof(high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ret != RCODE_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) req->node_addr, req->generation, req->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) &low, sizeof(low));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (ret != RCODE_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct sbp_session *sbp_session_find_by_guid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct sbp_tpg *tpg, u64 guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct se_session *se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct sbp_session *sess, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) spin_lock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sess = se_sess->fabric_sess_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (sess->guid == guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) found = sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spin_unlock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static struct sbp_login_descriptor *sbp_login_find_by_lun(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct sbp_session *session, u32 unpacked_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct sbp_login_descriptor *login, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) spin_lock_bh(&session->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_for_each_entry(login, &session->login_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (login->login_lun == unpacked_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) found = login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spin_unlock_bh(&session->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int sbp_login_count_all_by_lun(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct sbp_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 unpacked_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct se_session *se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct sbp_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct sbp_login_descriptor *login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_lock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) sess = se_sess->fabric_sess_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) list_for_each_entry(login, &sess->login_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (login->login_lun != unpacked_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!exclusive || login->exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) spin_unlock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct sbp_login_descriptor *sbp_login_find_by_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct sbp_tpg *tpg, int login_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct se_session *se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct sbp_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct sbp_login_descriptor *login, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) sess = se_sess->fabric_sess_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) list_for_each_entry(login, &sess->login_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (login->login_id == login_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) found = login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_unlock_bh(&tpg->se_tpg.session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct se_portal_group *se_tpg = &tpg->se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct se_lun *se_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (se_lun->unpacked_lun == login_lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return login_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return login_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static struct sbp_session *sbp_session_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct sbp_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u64 guid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct sbp_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) char guid_str[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) sess = kmalloc(sizeof(*sess), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_lock_init(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) INIT_LIST_HEAD(&sess->login_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sess->guid = guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) sizeof(struct sbp_target_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) TARGET_PROT_NORMAL, guid_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) sess, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (IS_ERR(sess->se_sess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pr_err("failed to init se_session\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret = PTR_ERR(sess->se_sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!list_empty(&sess->login_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (cancel_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cancel_delayed_work_sync(&sess->maint_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) target_remove_session(sess->se_sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (sess->card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) fw_card_put(sess->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) kfree(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void sbp_target_agent_unregister(struct sbp_target_agent *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void sbp_login_release(struct sbp_login_descriptor *login,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bool cancel_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct sbp_session *sess = login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* FIXME: abort/wait on tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) sbp_target_agent_unregister(login->tgt_agt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_del(&login->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) sbp_session_release(sess, cancel_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) kfree(login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static struct sbp_target_agent *sbp_target_agent_register(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct sbp_login_descriptor *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void sbp_management_request_login(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct sbp_management_agent *agent, struct sbp_management_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int *status_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct sbp_tport *tport = agent->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct sbp_tpg *tpg = tport->tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct sbp_session *sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct sbp_login_descriptor *login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct sbp_login_response_block *response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 unpacked_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int login_response_len, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unpacked_lun = sbp_get_lun_from_tpg(tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pr_notice("login to unknown LUN: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ret = read_peer_guid(&guid, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pr_warn("failed to read peer GUID: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unpacked_lun, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) sess = sbp_session_find_by_guid(tpg, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) login = sbp_login_find_by_lun(sess, unpacked_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (login) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pr_notice("initiator already logged-in\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * SBP-2 R4 says we should return access denied, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * that can confuse initiators. Instead we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * treat this like a reconnect, but send the login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * response block like a fresh login.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * This is required particularly in the case of Apple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * devices booting off the FireWire target, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * the firmware has an active login to the target. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * the OS takes control of the session it issues its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * LOGIN rather than a RECONNECT. To avoid the machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * waiting until the reconnect_hold expires, we can skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * the ACCESS_DENIED errors to speed things up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto already_logged_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * check exclusive bit in login request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * reject with access_denied if any logins present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) pr_warn("refusing exclusive login with other active logins\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * check exclusive bit in any existing login descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * reject with access_denied if any exclusive logins present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pr_warn("refusing login while another exclusive login present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * check we haven't exceeded the number of allowed logins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * reject with resources_unavailable if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) tport->max_logins_per_lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_warn("max number of logins reached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) sess = sbp_session_create(tpg, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (IS_ERR(sess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) switch (PTR_ERR(sess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ret = SBP_STATUS_ACCESS_DENIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = SBP_STATUS_RESOURCES_UNAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) STATUS_BLOCK_RESP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) STATUS_BLOCK_SBP_STATUS(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) sess->node_id = req->node_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sess->card = fw_card_get(req->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) sess->generation = req->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) sess->speed = req->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) schedule_delayed_work(&sess->maint_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) SESSION_MAINTENANCE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* only take the latest reconnect_hold into account */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) sess->reconnect_hold = min(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tport->max_reconnect_timeout) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) login = kmalloc(sizeof(*login), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (!login) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pr_err("failed to allocate login descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sbp_session_release(sess, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) login->sess = sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) login->login_lun = unpacked_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) login->login_id = atomic_inc_return(&login_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) login->tgt_agt = sbp_target_agent_register(login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (IS_ERR(login->tgt_agt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = PTR_ERR(login->tgt_agt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pr_err("failed to map command block handler: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sbp_session_release(sess, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) kfree(login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) list_add_tail(&login->link, &sess->login_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) already_logged_in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) response = kzalloc(sizeof(*response), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pr_err("failed to allocate login response block\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) sbp_login_release(login, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) login_response_len = clamp_val(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 12, sizeof(*response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) response->misc = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ((login_response_len & 0xffff) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) (login->login_id & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) &response->command_block_agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) sess->node_id, sess->generation, sess->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sbp2_pointer_to_addr(&req->orb.ptr2), response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) login_response_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) pr_debug("failed to write login response block: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) kfree(response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sbp_login_release(login, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kfree(response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void sbp_management_request_query_logins(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct sbp_management_agent *agent, struct sbp_management_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int *status_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pr_notice("QUERY LOGINS not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* FIXME: implement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void sbp_management_request_reconnect(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct sbp_management_agent *agent, struct sbp_management_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int *status_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct sbp_tport *tport = agent->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct sbp_tpg *tpg = tport->tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct sbp_login_descriptor *login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = read_peer_guid(&guid, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pr_warn("failed to read peer GUID: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) login = sbp_login_find_by_id(tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!login) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) pr_err("mgt_agent RECONNECT unknown login ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (login->sess->guid != guid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) spin_lock_bh(&login->sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (login->sess->card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) fw_card_put(login->sess->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* update the node details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) login->sess->generation = req->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) login->sess->node_id = req->node_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) login->sess->card = fw_card_get(req->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) login->sess->speed = req->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_unlock_bh(&login->sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void sbp_management_request_logout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct sbp_management_agent *agent, struct sbp_management_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int *status_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct sbp_tport *tport = agent->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct sbp_tpg *tpg = tport->tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct sbp_login_descriptor *login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) login = sbp_login_find_by_id(tpg, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!login) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pr_warn("cannot find login: %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) login->login_lun, login->login_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (req->node_addr != login->sess->node_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) pr_warn("logout from different node ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sbp_login_release(login, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void session_check_for_reset(struct sbp_session *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bool card_valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (sess->card) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) spin_lock_irq(&sess->card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) card_valid = (sess->card->local_node != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_unlock_irq(&sess->card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!card_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fw_card_put(sess->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) sess->card = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!card_valid || (sess->generation != sess->card->generation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pr_info("Waiting for reconnect from node: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sess->guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sess->node_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) sess->reconnect_expires = get_jiffies_64() +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ((sess->reconnect_hold + 1) * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static void session_reconnect_expired(struct sbp_session *sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct sbp_login_descriptor *login, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) LIST_HEAD(login_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) list_for_each_entry_safe(login, temp, &sess->login_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) login->sess = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) list_move_tail(&login->link, &login_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) list_for_each_entry_safe(login, temp, &login_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_del(&login->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) sbp_login_release(login, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sbp_session_release(sess, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void session_maintenance_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct sbp_session *sess = container_of(work, struct sbp_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) maint_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* could be called while tearing down the session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (list_empty(&sess->login_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (sess->node_id != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* check for bus reset and make node_id invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) session_check_for_reset(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) schedule_delayed_work(&sess->maint_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) SESSION_MAINTENANCE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* still waiting for reconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) schedule_delayed_work(&sess->maint_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) SESSION_MAINTENANCE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* reconnect timeout has expired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) session_reconnect_expired(sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case TCODE_READ_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pr_debug("tgt_agent AGENT_STATE READ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) state = agent->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *(__be32 *)data = cpu_to_be32(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pr_debug("tgt_agent AGENT_RESET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) agent->state = AGENT_STATE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct sbp2_pointer *ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) case TCODE_WRITE_BLOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (agent->state != AGENT_STATE_SUSPENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) agent->state != AGENT_STATE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) pr_notice("Ignoring ORB_POINTER write while active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return RCODE_CONFLICT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) agent->state = AGENT_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) agent->orb_pointer = sbp2_pointer_to_addr(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) agent->doorbell = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) agent->orb_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) queue_work(system_unbound_wq, &agent->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) case TCODE_READ_BLOCK_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pr_debug("tgt_agent ORB_POINTER READ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) addr_to_sbp2_pointer(agent->orb_pointer, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (agent->state != AGENT_STATE_SUSPENDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) pr_debug("Ignoring DOORBELL while active.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return RCODE_CONFLICT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) agent->state = AGENT_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) agent->doorbell = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) pr_debug("tgt_agent DOORBELL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) queue_work(system_unbound_wq, &agent->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) case TCODE_READ_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int tcode, void *data, struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) switch (tcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) case TCODE_WRITE_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* ignored as we don't send unsolicited status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) case TCODE_READ_QUADLET_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int tcode, int destination, int source, int generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned long long offset, void *data, size_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) void *callback_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct sbp_target_agent *agent = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct sbp_session *sess = agent->login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int sess_gen, sess_node, rcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) sess_gen = sess->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) sess_node = sess->node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (generation != sess_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) pr_notice("ignoring request with wrong generation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) rcode = RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (source != sess_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) pr_notice("ignoring request from foreign node (%x != %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) source, sess_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rcode = RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* turn offset into the offset from the start of the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) offset -= agent->handler.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (offset == 0x00 && length == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* AGENT_STATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else if (offset == 0x04 && length == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* AGENT_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) } else if (offset == 0x08 && length == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* ORB_POINTER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) } else if (offset == 0x10 && length == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* DOORBELL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } else if (offset == 0x14 && length == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* UNSOLICITED_STATUS_ENABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) data, agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rcode = RCODE_ADDRESS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) fw_send_response(card, request, rcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static void sbp_handle_command(struct sbp_target_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static int sbp_send_status(struct sbp_target_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void sbp_free_request(struct sbp_target_request *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void tgt_agent_process_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct sbp_target_request *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) container_of(work, struct sbp_target_request, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) req->orb_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) sbp2_pointer_to_addr(&req->orb.next_orb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) sbp2_pointer_to_addr(&req->orb.data_descriptor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (req->orb_pointer >> 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pr_debug("ORB with high bits set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case 0:/* Format specified by this standard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) sbp_handle_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case 1: /* Reserved for future standardization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case 2: /* Vendor-dependent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) STATUS_BLOCK_RESP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) STATUS_BLOCK_SBP_STATUS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) case 3: /* Dummy ORB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) STATUS_BLOCK_RESP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) STATUS_BLOCK_SBP_STATUS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) SBP_STATUS_DUMMY_ORB_COMPLETE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* used to double-check we haven't been issued an AGENT_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) bool active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) active = (agent->state == AGENT_STATE_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct fw_card *card, u64 next_orb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct se_session *se_sess = sess->se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct sbp_target_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int tag, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (tag < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) req->se_cmd.map_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) req->se_cmd.map_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) req->se_cmd.tag = next_orb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void tgt_agent_fetch_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct sbp_target_agent *agent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) container_of(work, struct sbp_target_agent, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct sbp_session *sess = agent->login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct sbp_target_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) bool doorbell = agent->doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u64 next_orb = agent->orb_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) while (next_orb && tgt_agent_check_active(agent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) req = sbp_mgt_get_req(sess, sess->card, next_orb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (IS_ERR(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) agent->state = AGENT_STATE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) req->login = agent->login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) req->orb_pointer = next_orb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) req->orb_pointer >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) req->status.orb_low = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) req->orb_pointer & 0xfffffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* read in the ORB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) sess->node_id, sess->generation, sess->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) req->orb_pointer, &req->orb, sizeof(req->orb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pr_debug("tgt_orb fetch failed: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) STATUS_BLOCK_SRC(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) STATUS_SRC_ORB_FINISHED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) STATUS_BLOCK_RESP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) STATUS_BLOCK_DEAD(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) STATUS_BLOCK_SBP_STATUS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) agent->state = AGENT_STATE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* check the next_ORB field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) next_orb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) STATUS_SRC_ORB_FINISHED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) STATUS_SRC_ORB_CONTINUING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (tgt_agent_check_active(agent) && !doorbell) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) INIT_WORK(&req->work, tgt_agent_process_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) queue_work(system_unbound_wq, &req->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* don't process this request, just check next_ORB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sbp_free_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) doorbell = agent->doorbell = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* check if we should carry on processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (next_orb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) agent->orb_pointer = next_orb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) agent->state = AGENT_STATE_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static struct sbp_target_agent *sbp_target_agent_register(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct sbp_login_descriptor *login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct sbp_target_agent *agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) agent = kmalloc(sizeof(*agent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (!agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) spin_lock_init(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) agent->handler.length = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) agent->handler.address_callback = tgt_agent_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) agent->handler.callback_data = agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) agent->login = login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) agent->state = AGENT_STATE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) INIT_WORK(&agent->work, tgt_agent_fetch_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) agent->orb_pointer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) agent->doorbell = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ret = fw_core_add_address_handler(&agent->handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) &sbp_register_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) kfree(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) fw_core_remove_address_handler(&agent->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) cancel_work_sync(&agent->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) kfree(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * Simple wrapper around fw_run_transaction that retries the transaction several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * times in case of failure, with an exponential backoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int generation, int speed, unsigned long long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) void *payload, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int attempt, ret, delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) for (attempt = 1; attempt <= 5; attempt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ret = fw_run_transaction(card, tcode, destination_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) generation, speed, offset, payload, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) case RCODE_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) case RCODE_TYPE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) case RCODE_ADDRESS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case RCODE_GENERATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) delay = 5 * attempt * attempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) usleep_range(delay, delay * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Wrapper around sbp_run_transaction that gets the card, destination,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * generation and speed out of the request's session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static int sbp_run_request_transaction(struct sbp_target_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int tcode, unsigned long long offset, void *payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct sbp_login_descriptor *login = req->login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct sbp_session *sess = login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct fw_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int node_id, generation, speed, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) card = fw_card_get(sess->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) node_id = sess->node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) generation = sess->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) speed = sess->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) offset, payload, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) fw_card_put(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int sbp_fetch_command(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int ret, cmd_len, copy_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) cmd_len = scsi_command_size(req->orb.command_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (!req->cmd_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) memcpy(req->cmd_buf, req->orb.command_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) min_t(int, cmd_len, sizeof(req->orb.command_block)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (cmd_len > sizeof(req->orb.command_block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) pr_debug("sbp_fetch_command: filling in long command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) copy_len = cmd_len - sizeof(req->orb.command_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ret = sbp_run_request_transaction(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) TCODE_READ_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) req->orb_pointer + sizeof(req->orb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) req->cmd_buf + sizeof(req->orb.command_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) copy_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (ret != RCODE_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static int sbp_fetch_page_table(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int pg_tbl_sz, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct sbp_page_table_entry *pg_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) sizeof(struct sbp_page_table_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sbp2_pointer_to_addr(&req->orb.data_descriptor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) pg_tbl, pg_tbl_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) kfree(pg_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) req->pg_tbl = pg_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static void sbp_calc_data_length_direction(struct sbp_target_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) u32 *data_len, enum dma_data_direction *data_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int data_size, direction, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!data_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) *data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) *data_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (req->pg_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) for (idx = 0; idx < data_size; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) *data_len += be16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) req->pg_tbl[idx].segment_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) *data_len = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static void sbp_handle_command(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct sbp_login_descriptor *login = req->login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct sbp_session *sess = login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) int ret, unpacked_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u32 data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) enum dma_data_direction data_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ret = sbp_fetch_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ret = sbp_fetch_page_table(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) pr_debug("sbp_handle_command: fetch page table failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unpacked_lun = req->login->login_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) sbp_calc_data_length_direction(req, &data_length, &data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) req->orb_pointer, unpacked_lun, data_length, data_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* only used for printk until we do TMRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) req->se_cmd.tag = req->orb_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) req->sense_buf, unpacked_lun, data_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * DMA_FROM_DEVICE = write to initiator (SCSI READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static int sbp_rw_data(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct sbp_session *sess = req->login->sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) generation, num_pte, length, tfr_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) rcode = RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct sbp_page_table_entry *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) unsigned long long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct fw_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct sg_mapping_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) tcode = TCODE_WRITE_BLOCK_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) sg_miter_flags = SG_MITER_FROM_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) tcode = TCODE_READ_BLOCK_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) sg_miter_flags = SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (pg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) pr_err("sbp_run_transaction: page size ignored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pg_size = 0x100 << pg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) spin_lock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) card = fw_card_get(sess->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) node_id = sess->node_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) generation = sess->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) spin_unlock_bh(&sess->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (req->pg_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pte = req->pg_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) num_pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) length = req->se_cmd.data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) sg_miter_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) while (length || num_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (!length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) be32_to_cpu(pte->segment_base_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) length = be16_to_cpu(pte->segment_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) num_pte--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) sg_miter_next(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) tfr_length = min3(length, max_payload, (int)iter.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* FIXME: take page_size into account */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) rcode = sbp_run_transaction(card, tcode, node_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) generation, speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) offset, iter.addr, tfr_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (rcode != RCODE_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) length -= tfr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) offset += tfr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) iter.consumed = tfr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) sg_miter_stop(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) fw_card_put(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (rcode == RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) WARN_ON(length != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static int sbp_send_status(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int rc, ret = 0, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct sbp_login_descriptor *login = req->login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) login->status_fifo_addr, &req->status, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (rc != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) goto put_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) req->orb_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Drop the extra ACK_KREF reference taken by target_submit_cmd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * final se_cmd->cmd_kref put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) put_ref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) target_put_sess_cmd(&req->se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static void sbp_sense_mangle(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct se_cmd *se_cmd = &req->se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) u8 *sense = req->sense_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u8 *status = req->status.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) WARN_ON(se_cmd->scsi_sense_length < 18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) switch (sense[0] & 0x7f) { /* sfmt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) case 0x70: /* current, fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) status[0] = 0 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) case 0x71: /* deferred, fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) status[0] = 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) case 0x72: /* current, descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) case 0x73: /* deferred, descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * TODO: SBP-3 specifies what we should do with descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * format sense data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) sense[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) status[0] |= se_cmd->scsi_status & 0x3f;/* status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) status[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) (sense[0] & 0x80) | /* valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) (sense[2] & 0x0f); /* sense_key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) status[2] = se_cmd->scsi_asc; /* sense_code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) status[4] = sense[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) status[5] = sense[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) status[6] = sense[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) status[7] = sense[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* CDB-dependent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) status[8] = sense[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) status[9] = sense[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) status[10] = sense[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) status[11] = sense[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* fru */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) status[12] = sense[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* sense_key-dependent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) status[13] = sense[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) status[14] = sense[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) status[15] = sense[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) STATUS_BLOCK_LEN(5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int sbp_send_sense(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct se_cmd *se_cmd = &req->se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (se_cmd->scsi_sense_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) sbp_sense_mangle(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void sbp_free_request(struct sbp_target_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct se_cmd *se_cmd = &req->se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct se_session *se_sess = se_cmd->se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) kfree(req->pg_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) kfree(req->cmd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) target_free_tag(se_sess, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static void sbp_mgt_agent_process(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct sbp_management_agent *agent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) container_of(work, struct sbp_management_agent, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct sbp_management_request *req = agent->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int status_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* fetch the ORB from the initiator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) req->node_addr, req->generation, req->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) agent->orb_offset, &req->orb, sizeof(req->orb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) pr_debug("mgt_orb fetch failed: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sbp2_pointer_to_addr(&req->orb.ptr1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) sbp2_pointer_to_addr(&req->orb.ptr2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) sbp2_pointer_to_addr(&req->orb.status_fifo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) pr_err("mgt_orb bad request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case MANAGEMENT_ORB_FUNCTION_LOGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) sbp_management_request_login(agent, req, &status_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) sbp_management_request_query_logins(agent, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) &status_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) case MANAGEMENT_ORB_FUNCTION_RECONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) sbp_management_request_reconnect(agent, req, &status_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) pr_notice("SET PASSWORD not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) case MANAGEMENT_ORB_FUNCTION_LOGOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) sbp_management_request_logout(agent, req, &status_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) pr_notice("ABORT TASK not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) pr_notice("ABORT TASK SET not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) pr_notice("LOGICAL UNIT RESET not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) pr_notice("TARGET RESET not implemented\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) pr_notice("unknown management function 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) req->status.status = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) req->status.orb_low = cpu_to_be32(agent->orb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* write the status block back to the initiator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) req->node_addr, req->generation, req->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) sbp2_pointer_to_addr(&req->orb.status_fifo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) &req->status, 8 + status_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (ret != RCODE_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) pr_debug("mgt_orb status write failed: %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) fw_card_put(req->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) agent->state = MANAGEMENT_AGENT_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static void sbp_mgt_agent_rw(struct fw_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct fw_request *request, int tcode, int destination, int source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int generation, unsigned long long offset, void *data, size_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) void *callback_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct sbp_management_agent *agent = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct sbp2_pointer *ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) int rcode = RCODE_ADDRESS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (!agent->tport->enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if ((offset != agent->handler.offset) || (length != 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct sbp_management_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int prev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) spin_lock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) prev_state = agent->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) agent->state = MANAGEMENT_AGENT_STATE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) spin_unlock_bh(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) pr_notice("ignoring management request while busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) rcode = RCODE_CONFLICT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) req = kzalloc(sizeof(*req), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) rcode = RCODE_CONFLICT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) req->card = fw_card_get(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) req->generation = generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) req->node_addr = source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) req->speed = fw_get_request_speed(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) agent->orb_offset = sbp2_pointer_to_addr(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) agent->request = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) queue_work(system_unbound_wq, &agent->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) rcode = RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) addr_to_sbp2_pointer(agent->orb_offset, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) rcode = RCODE_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) rcode = RCODE_TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) fw_send_response(card, request, rcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static struct sbp_management_agent *sbp_management_agent_register(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct sbp_tport *tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct sbp_management_agent *agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) agent = kmalloc(sizeof(*agent), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (!agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) spin_lock_init(&agent->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) agent->tport = tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) agent->handler.length = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) agent->handler.address_callback = sbp_mgt_agent_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) agent->handler.callback_data = agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) agent->state = MANAGEMENT_AGENT_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) INIT_WORK(&agent->work, sbp_mgt_agent_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) agent->orb_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) agent->request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ret = fw_core_add_address_handler(&agent->handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) &sbp_register_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) kfree(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) return agent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) fw_core_remove_address_handler(&agent->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) cancel_work_sync(&agent->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) kfree(agent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static int sbp_check_true(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static int sbp_check_false(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return &tport->tport_name[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) static u16 sbp_get_tag(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return tpg->tport_tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static void sbp_release_cmd(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) struct sbp_target_request *req = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct sbp_target_request, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) sbp_free_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static u32 sbp_sess_get_index(struct se_session *se_sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static int sbp_write_pending(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct sbp_target_request *req = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) struct sbp_target_request, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ret = sbp_rw_data(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) STATUS_BLOCK_RESP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) STATUS_BLOCK_SBP_STATUS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) target_execute_cmd(se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static int sbp_get_cmd_state(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static int sbp_queue_data_in(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct sbp_target_request *req = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct sbp_target_request, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) ret = sbp_rw_data(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) req->status.status |= cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) STATUS_BLOCK_DEAD(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) STATUS_BLOCK_LEN(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) sbp_send_status(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return sbp_send_sense(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * Called after command (no data transfer) or after the write (to device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * operation is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static int sbp_queue_status(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) struct sbp_target_request *req = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) struct sbp_target_request, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return sbp_send_sense(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static void sbp_aborted_task(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int sbp_check_stop_free(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct sbp_target_request *req = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) struct sbp_target_request, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return transport_generic_free_cmd(&req->se_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct se_lun *lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static int sbp_update_unit_directory(struct sbp_tport *tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct se_lun *lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) u32 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (tport->unit_directory.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) fw_core_remove_descriptor(&tport->unit_directory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) kfree(tport->unit_directory.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) tport->unit_directory.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (!tport->enable || !tport->tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * Number of entries in the final unit directory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * - all of those in the template
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * - management_agent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * - unit_characteristics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * - reconnect_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * - unit unique ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * - one for each LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * MUST NOT include leaf or sub-directory entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (tport->directory_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* allocate num_entries + 4 for the header and unique ID leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* directory_length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) data[idx++] = num_entries << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /* directory_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (tport->directory_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* unit directory template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) memcpy(&data[idx], sbp_unit_directory_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) sizeof(sbp_unit_directory_template));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) idx += ARRAY_SIZE(sbp_unit_directory_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* management_agent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /* unit_characteristics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) data[idx++] = 0x3a000000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) SBP_ORB_FETCH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* reconnect_timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) /* unit unique ID (leaf is just after LUNs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) data[idx++] = 0x8d000000 | (num_luns + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct se_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * rcu_dereference_raw protected by se_lun->lun_group symlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * reference to se_device->dev_group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) dev = rcu_dereference_raw(lun->lun_se_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) type = dev->transport->get_device_type(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* logical_unit_number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) data[idx++] = 0x14000000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ((type << 16) & 0x1f0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) (lun->unpacked_lun & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* unit unique ID leaf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) data[idx++] = 2 << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) data[idx++] = tport->guid >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) data[idx++] = tport->guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) tport->unit_directory.length = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) tport->unit_directory.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) ret = fw_core_add_descriptor(&tport->unit_directory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) kfree(tport->unit_directory.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) tport->unit_directory.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) const char *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) char c, nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) int pos = 0, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) *wwn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) c = *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (c == '\n' && cp[1] == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (c == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) err = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (pos != 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return cp - name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) err = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (isdigit(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) nibble = c - '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) else if (isxdigit(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) nibble = tolower(c) - 'a' + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) *wwn = (*wwn << 4) | nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) err = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) printk(KERN_INFO "err %u len %zu pos %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) err, cp - name, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return snprintf(buf, len, "%016llx", wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) u64 guid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (sbp_parse_wwn(name, &guid) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static int sbp_post_link_lun(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct se_portal_group *se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) struct se_lun *se_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return sbp_update_unit_directory(tpg->tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static void sbp_pre_unlink_lun(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct se_portal_group *se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) struct se_lun *se_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) tport->enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ret = sbp_update_unit_directory(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) pr_err("unlink LUN: failed to update unit directory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct sbp_tport *tport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) container_of(wwn, struct sbp_tport, tport_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct sbp_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) unsigned long tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (strstr(name, "tpgt_") != name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (tport->tpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) pr_err("Only one TPG per Unit is possible.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (!tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) tpg->tport = tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) tpg->tport_tpgt = tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) tport->tpg = tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) /* default attribute values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) tport->enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) tport->directory_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) tport->mgt_orb_timeout = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) tport->max_reconnect_timeout = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) tport->max_logins_per_lun = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) tport->mgt_agt = sbp_management_agent_register(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (IS_ERR(tport->mgt_agt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ret = PTR_ERR(tport->mgt_agt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto out_free_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) goto out_unreg_mgt_agt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return &tpg->se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) out_unreg_mgt_agt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) sbp_management_agent_unregister(tport->mgt_agt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) out_free_tpg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) tport->tpg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) kfree(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static void sbp_drop_tpg(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) core_tpg_deregister(se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) sbp_management_agent_unregister(tport->mgt_agt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) tport->tpg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) kfree(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static struct se_wwn *sbp_make_tport(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct target_fabric_configfs *tf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct config_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct sbp_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) u64 guid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (sbp_parse_wwn(name, &guid) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) tport = kzalloc(sizeof(*tport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (!tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) tport->guid = guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return &tport->tport_wwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static void sbp_drop_tport(struct se_wwn *wwn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct sbp_tport *tport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) container_of(wwn, struct sbp_tport, tport_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) kfree(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) CONFIGFS_ATTR_RO(sbp_wwn_, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static struct configfs_attribute *sbp_wwn_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) &sbp_wwn_attr_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (tport->directory_id == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return sprintf(page, "implicit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return sprintf(page, "%06x\n", tport->directory_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (tport->enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) pr_err("Cannot change the directory_id on an active target.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (strstr(page, "implicit") == page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) tport->directory_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (kstrtoul(page, 16, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (val > 0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) tport->directory_id = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) return sprintf(page, "%d\n", tport->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) static ssize_t sbp_tpg_enable_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (kstrtoul(page, 0, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if ((val != 0) && (val != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (tport->enable == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) pr_err("Cannot enable a target with no LUNs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* XXX: force-shutdown sessions instead? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) spin_lock_bh(&se_tpg->session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (!list_empty(&se_tpg->tpg_sess_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) spin_unlock_bh(&se_tpg->session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) spin_unlock_bh(&se_tpg->session_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) tport->enable = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) ret = sbp_update_unit_directory(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) pr_err("Could not update Config ROM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) CONFIGFS_ATTR(sbp_tpg_, directory_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) CONFIGFS_ATTR(sbp_tpg_, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static struct configfs_attribute *sbp_tpg_base_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) &sbp_tpg_attr_directory_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) &sbp_tpg_attr_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return sprintf(page, "%d\n", tport->mgt_orb_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (kstrtoul(page, 0, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if ((val < 1) || (val > 127))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (tport->mgt_orb_timeout == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) tport->mgt_orb_timeout = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) ret = sbp_update_unit_directory(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return sprintf(page, "%d\n", tport->max_reconnect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (kstrtoul(page, 0, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if ((val < 1) || (val > 32767))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (tport->max_reconnect_timeout == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) tport->max_reconnect_timeout = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) ret = sbp_update_unit_directory(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return sprintf(page, "%d\n", tport->max_logins_per_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct sbp_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (kstrtoul(page, 0, &val) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if ((val < 1) || (val > 127))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /* XXX: also check against current count? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) tport->max_logins_per_lun = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) &sbp_tpg_attrib_attr_mgt_orb_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) &sbp_tpg_attrib_attr_max_reconnect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) &sbp_tpg_attrib_attr_max_logins_per_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static const struct target_core_fabric_ops sbp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) .fabric_name = "sbp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) .tpg_get_wwn = sbp_get_fabric_wwn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) .tpg_get_tag = sbp_get_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) .tpg_check_demo_mode = sbp_check_true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) .tpg_check_demo_mode_cache = sbp_check_true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) .tpg_check_demo_mode_write_protect = sbp_check_false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .tpg_check_prod_mode_write_protect = sbp_check_false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .tpg_get_inst_index = sbp_tpg_get_inst_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) .release_cmd = sbp_release_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) .sess_get_index = sbp_sess_get_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) .write_pending = sbp_write_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) .set_default_node_attributes = sbp_set_default_node_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) .get_cmd_state = sbp_get_cmd_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) .queue_data_in = sbp_queue_data_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) .queue_status = sbp_queue_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) .queue_tm_rsp = sbp_queue_tm_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) .aborted_task = sbp_aborted_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) .check_stop_free = sbp_check_stop_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) .fabric_make_wwn = sbp_make_tport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) .fabric_drop_wwn = sbp_drop_tport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) .fabric_make_tpg = sbp_make_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) .fabric_drop_tpg = sbp_drop_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) .fabric_post_link = sbp_post_link_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) .fabric_pre_unlink = sbp_pre_unlink_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) .fabric_make_np = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) .fabric_drop_np = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) .fabric_init_nodeacl = sbp_init_nodeacl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) .tfc_wwn_attrs = sbp_wwn_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) static int __init sbp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return target_register_template(&sbp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static void __exit sbp_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) target_unregister_template(&sbp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) MODULE_DESCRIPTION("FireWire SBP fabric driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) module_init(sbp_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) module_exit(sbp_exit);