^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016 Avago Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <uapi/scsi/fc/fc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "../host/nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "../target/nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/nvme-fc-driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/nvme-fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) NVMF_OPT_ERR = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) NVMF_OPT_WWNN = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) NVMF_OPT_WWPN = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) NVMF_OPT_ROLES = 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) NVMF_OPT_FCADDR = 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) NVMF_OPT_LPWWNN = 1 << 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) NVMF_OPT_LPWWPN = 1 << 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct fcloop_ctrl_options {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 wwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u64 wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 roles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u32 fcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u64 lpwwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u64 lpwwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static const match_table_t opt_tokens = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { NVMF_OPT_WWNN, "wwnn=%s" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { NVMF_OPT_WWPN, "wwpn=%s" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { NVMF_OPT_ROLES, "roles=%d" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) { NVMF_OPT_FCADDR, "fcaddr=%x" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) { NVMF_OPT_ERR, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int fcloop_verify_addr(substring_t *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) size_t blen = s->to - s->from + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) strncmp(s->from, "0x", 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) fcloop_parse_options(struct fcloop_ctrl_options *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) substring_t args[MAX_OPT_ARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) char *options, *o, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int token, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) options = o = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) while ((p = strsep(&o, ",\n")) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) token = match_token(p, opt_tokens, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) opts->mask |= token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) switch (token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case NVMF_OPT_WWNN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) opts->wwnn = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) case NVMF_OPT_WWPN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) opts->wwpn = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case NVMF_OPT_ROLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) opts->roles = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) case NVMF_OPT_FCADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (match_hex(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) opts->fcaddr = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) case NVMF_OPT_LPWWNN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) opts->lpwwnn = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case NVMF_OPT_LPWWPN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) opts->lpwwpn = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pr_warn("unknown parameter or missing value '%s'\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) out_free_options:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) kfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) substring_t args[MAX_OPT_ARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) char *options, *o, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int token, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *nname = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *pname = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) options = o = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) while ((p = strsep(&o, ",\n")) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) token = match_token(p, opt_tokens, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) switch (token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) case NVMF_OPT_WWNN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *nname = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case NVMF_OPT_WWPN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (fcloop_verify_addr(args) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) match_u64(args, &token64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *pname = token64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_warn("unknown parameter or missing value '%s'\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto out_free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) out_free_options:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (*nname == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (*pname == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static DEFINE_SPINLOCK(fcloop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static LIST_HEAD(fcloop_lports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static LIST_HEAD(fcloop_nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct fcloop_lport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct list_head lport_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct completion unreg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct fcloop_lport_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct fcloop_rport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct nvme_fc_remote_port *remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct nvmet_fc_target_port *targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct fcloop_nport *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct list_head ls_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct work_struct ls_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct fcloop_tport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct nvmet_fc_target_port *targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct nvme_fc_remote_port *remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct fcloop_nport *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct list_head ls_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct work_struct ls_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct fcloop_nport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct fcloop_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct list_head nport_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u64 node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u64 port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 port_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct fcloop_lsreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct nvmefc_ls_req *lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct nvmefc_ls_rsp ls_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int lsdir; /* H2T or T2H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct list_head ls_list; /* fcloop_rport->ls_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct fcloop_rscn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) INI_IO_START = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) INI_IO_ACTIVE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) INI_IO_ABORTED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) INI_IO_COMPLETED = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct fcloop_fcpreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct nvmefc_fcp_req *fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spinlock_t reqlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u32 inistate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) bool active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bool aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct work_struct fcp_rcv_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct work_struct abort_rcv_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct work_struct tio_done_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct nvmefc_tgt_fcp_req tgt_fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct fcloop_ini_fcpreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct nvmefc_fcp_req *fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct fcloop_fcpreq *tfcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spinlock_t inilock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline struct fcloop_lsreq *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline struct fcloop_fcpreq *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) fcloop_create_queue(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned int qidx, u16 qsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) void **handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *handle = localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) fcloop_delete_queue(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int idx, void *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) fcloop_rport_lsrqst_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct fcloop_rport *rport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) container_of(work, struct fcloop_rport, ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct fcloop_lsreq *tls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) spin_lock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) tls_req = list_first_entry_or_null(&rport->ls_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct fcloop_lsreq, ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!tls_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) list_del(&tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) spin_unlock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * callee may free memory containing tls_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * do not reference lsreq after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) spin_lock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_unlock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct nvmefc_ls_req *lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct fcloop_lsreq *tls_req = lsreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct fcloop_rport *rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) tls_req->lsreq = lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) INIT_LIST_HEAD(&tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!rport->targetport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tls_req->status = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_lock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) list_add_tail(&rport->ls_list, &tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_unlock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) schedule_work(&rport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) tls_req->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) &tls_req->ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) lsreq->rqstaddr, lsreq->rqstlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct nvmefc_ls_rsp *lsrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct nvmefc_ls_req *lsreq = tls_req->lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct fcloop_tport *tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct nvme_fc_remote_port *remoteport = tport->remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct fcloop_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) memcpy(lsreq->rspaddr, lsrsp->rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ((lsreq->rsplen < lsrsp->rsplen) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) lsreq->rsplen : lsrsp->rsplen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) lsrsp->done(lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (remoteport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) spin_lock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) list_add_tail(&rport->ls_list, &tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) spin_unlock(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) schedule_work(&rport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) fcloop_tport_lsrqst_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct fcloop_tport *tport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) container_of(work, struct fcloop_tport, ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct fcloop_lsreq *tls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) tls_req = list_first_entry_or_null(&tport->ls_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct fcloop_lsreq, ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!tls_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) list_del(&tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) spin_unlock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * callee may free memory containing tls_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * do not reference lsreq after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct nvmefc_ls_req *lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct fcloop_lsreq *tls_req = lsreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct fcloop_tport *tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * hosthandle should be the dst.rport value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * hosthandle ignored as fcloop currently is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * 1:1 tgtport vs remoteport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tls_req->lsreq = lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) INIT_LIST_HEAD(&tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!tport->remoteport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) tls_req->status = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) spin_lock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) list_add_tail(&tport->ls_list, &tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) spin_unlock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) schedule_work(&tport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tls_req->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) lsreq->rqstaddr, lsreq->rqstlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct nvmefc_ls_rsp *lsrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct nvmefc_ls_req *lsreq = tls_req->lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct fcloop_rport *rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct nvmet_fc_target_port *targetport = rport->targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) memcpy(lsreq->rspaddr, lsrsp->rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ((lsreq->rsplen < lsrsp->rsplen) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) lsreq->rsplen : lsrsp->rsplen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) lsrsp->done(lsrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (targetport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) spin_lock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_add_tail(&tport->ls_list, &tls_req->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) schedule_work(&tport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) fcloop_t2h_host_release(void *hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* host handle ignored for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Simulate reception of RSCN and converting it to a initiator transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * call to rescan a remote port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) fcloop_tgt_rscn_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct fcloop_rscn *tgt_rscn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) container_of(work, struct fcloop_rscn, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct fcloop_tport *tport = tgt_rscn->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (tport->remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) nvme_fc_rescan_remoteport(tport->remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) kfree(tgt_rscn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct fcloop_rscn *tgt_rscn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!tgt_rscn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) tgt_rscn->tport = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) schedule_work(&tgt_rscn->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) fcloop_tfcp_req_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct fcloop_fcpreq *tfcp_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) container_of(ref, struct fcloop_fcpreq, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) kfree(tfcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return kref_get_unless_zero(&tfcp_req->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct fcloop_fcpreq *tfcp_req, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct fcloop_ini_fcpreq *inireq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (fcpreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) inireq = fcpreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) spin_lock(&inireq->inilock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) inireq->tfcp_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock(&inireq->inilock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) fcpreq->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) fcpreq->done(fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* release original io reference on tgt struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) fcloop_tfcp_req_put(tfcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) fcloop_fcp_recv_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct fcloop_fcpreq *tfcp_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) bool aborted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) switch (tfcp_req->inistate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) case INI_IO_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tfcp_req->inistate = INI_IO_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) case INI_IO_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) aborted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (unlikely(aborted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = -ECANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) &tfcp_req->tgt_fcp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) fcpreq->cmdaddr, fcpreq->cmdlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) fcloop_call_host_done(fcpreq, tfcp_req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) fcloop_fcp_abort_recv_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct fcloop_fcpreq *tfcp_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) container_of(work, struct fcloop_fcpreq, abort_rcv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct nvmefc_fcp_req *fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bool completed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) fcpreq = tfcp_req->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) switch (tfcp_req->inistate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case INI_IO_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case INI_IO_COMPLETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) completed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (unlikely(completed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* remove reference taken in original abort downcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) fcloop_tfcp_req_put(tfcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (tfcp_req->tport->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) &tfcp_req->tgt_fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) tfcp_req->fcpreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* call_host_done releases reference for abort downcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * FCP IO operation done by target completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * call back up initiator "done" flows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) fcloop_tgt_fcprqst_done_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct fcloop_fcpreq *tfcp_req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) container_of(work, struct fcloop_fcpreq, tio_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct nvmefc_fcp_req *fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) fcpreq = tfcp_req->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tfcp_req->inistate = INI_IO_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) fcloop_fcp_req(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) void *hw_queue_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct nvmefc_fcp_req *fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct fcloop_rport *rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct fcloop_ini_fcpreq *inireq = fcpreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct fcloop_fcpreq *tfcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!rport->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (!tfcp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) inireq->fcpreq = fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) inireq->tfcp_req = tfcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_lock_init(&inireq->inilock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) tfcp_req->fcpreq = fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tfcp_req->tport = rport->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tfcp_req->inistate = INI_IO_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) spin_lock_init(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) kref_init(&tfcp_req->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) schedule_work(&tfcp_req->fcp_rcv_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct scatterlist *io_sg, u32 offset, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void *data_p, *io_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 data_len, io_len, tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) io_p = sg_virt(io_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) io_len = io_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for ( ; offset; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tlen = min_t(u32, offset, io_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) offset -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) io_len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!io_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) io_sg = sg_next(io_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) io_p = sg_virt(io_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) io_len = io_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) io_p += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) data_p = sg_virt(data_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) data_len = data_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) for ( ; length; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) tlen = min_t(u32, io_len, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) tlen = min_t(u32, tlen, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (op == NVMET_FCOP_WRITEDATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) memcpy(data_p, io_p, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) memcpy(io_p, data_p, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) length -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) io_len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if ((!io_len) && (length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) io_sg = sg_next(io_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) io_p = sg_virt(io_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) io_len = io_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) io_p += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) data_len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if ((!data_len) && (length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) data_sg = sg_next(data_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) data_p = sg_virt(data_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) data_len = data_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) data_p += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct nvmefc_tgt_fcp_req *tgt_fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct nvmefc_fcp_req *fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u32 rsplen = 0, xfrlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int fcp_err = 0, active, aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) u8 op = tgt_fcpreq->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) fcpreq = tfcp_req->fcpreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) active = tfcp_req->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) aborted = tfcp_req->aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) tfcp_req->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (unlikely(active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* illegal - call while i/o active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (unlikely(aborted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* target transport has aborted i/o prior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) tfcp_req->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) tgt_fcpreq->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) tgt_fcpreq->fcp_error = -ECANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) tgt_fcpreq->done(tgt_fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * if fcpreq is NULL, the I/O has been aborted (from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * initiator side). For the target side, act as if all is well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * but don't actually move data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) case NVMET_FCOP_WRITEDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) xfrlen = tgt_fcpreq->transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (fcpreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) fcpreq->first_sgl, tgt_fcpreq->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) xfrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) fcpreq->transferred_length += xfrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) case NVMET_FCOP_READDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) case NVMET_FCOP_READDATA_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) xfrlen = tgt_fcpreq->transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (fcpreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) fcpreq->first_sgl, tgt_fcpreq->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) xfrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) fcpreq->transferred_length += xfrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (op == NVMET_FCOP_READDATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Fall-Thru to RSP handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) case NVMET_FCOP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (fcpreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) fcpreq->rsplen : tgt_fcpreq->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (rsplen < tgt_fcpreq->rsplen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) fcp_err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) fcpreq->rcv_rsplen = rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) fcpreq->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) tfcp_req->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) fcp_err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) tfcp_req->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) tgt_fcpreq->transferred_length = xfrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tgt_fcpreq->fcp_error = fcp_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tgt_fcpreq->done(tgt_fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct nvmefc_tgt_fcp_req *tgt_fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * mark aborted only in case there were 2 threads in transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * (one doing io, other doing abort) and only kills ops posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * after the abort request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tfcp_req->aborted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tfcp_req->status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * nothing more to do. If io wasn't active, the transport should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * immediately call the req_release. If it was active, the op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * will complete, and the lldd should call req_release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct nvmefc_tgt_fcp_req *tgt_fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) schedule_work(&tfcp_req->tio_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct nvmefc_ls_req *lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) void *hosthandle, struct nvmefc_ls_req *lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) fcloop_fcp_abort(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) void *hw_queue_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct nvmefc_fcp_req *fcpreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct fcloop_ini_fcpreq *inireq = fcpreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct fcloop_fcpreq *tfcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) bool abortio = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) spin_lock(&inireq->inilock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) tfcp_req = inireq->tfcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (tfcp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) fcloop_tfcp_req_get(tfcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_unlock(&inireq->inilock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (!tfcp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* abort has already been called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* break initiator/target relationship for io */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) spin_lock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) switch (tfcp_req->inistate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case INI_IO_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case INI_IO_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) tfcp_req->inistate = INI_IO_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) case INI_IO_COMPLETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) abortio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) spin_unlock_irq(&tfcp_req->reqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (abortio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* leave the reference while the work item is scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * as the io has already had the done callback made,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * nothing more to do. So release the reference taken above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) fcloop_tfcp_req_put(tfcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) fcloop_nport_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct fcloop_nport *nport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) container_of(ref, struct fcloop_nport, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) list_del(&nport->nport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) kfree(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) fcloop_nport_put(struct fcloop_nport *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) kref_put(&nport->ref, fcloop_nport_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) fcloop_nport_get(struct fcloop_nport *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return kref_get_unless_zero(&nport->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) fcloop_localport_delete(struct nvme_fc_local_port *localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct fcloop_lport_priv *lport_priv = localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct fcloop_lport *lport = lport_priv->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* release any threads waiting for the unreg to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) complete(&lport->unreg_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct fcloop_rport *rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) flush_work(&rport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) fcloop_nport_put(rport->nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct fcloop_tport *tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) flush_work(&tport->ls_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) fcloop_nport_put(tport->nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #define FCLOOP_HW_QUEUES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #define FCLOOP_SGL_SEGS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static struct nvme_fc_port_template fctemplate = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .localport_delete = fcloop_localport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .remoteport_delete = fcloop_remoteport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .create_queue = fcloop_create_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .delete_queue = fcloop_delete_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .ls_req = fcloop_h2t_ls_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .fcp_io = fcloop_fcp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .ls_abort = fcloop_h2t_ls_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .fcp_abort = fcloop_fcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .max_hw_queues = FCLOOP_HW_QUEUES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .max_sgl_segments = FCLOOP_SGL_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .dma_boundary = FCLOOP_DMABOUND_4G,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* sizes of additional private data for data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .local_priv_sz = sizeof(struct fcloop_lport_priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .remote_priv_sz = sizeof(struct fcloop_rport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static struct nvmet_fc_target_template tgttemplate = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .targetport_delete = fcloop_targetport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .fcp_op = fcloop_fcp_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .fcp_abort = fcloop_tgt_fcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .fcp_req_release = fcloop_fcp_req_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .discovery_event = fcloop_tgt_discovery_evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .ls_req = fcloop_t2h_ls_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .ls_abort = fcloop_t2h_ls_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .host_release = fcloop_t2h_host_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) .max_hw_queues = FCLOOP_HW_QUEUES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) .max_sgl_segments = FCLOOP_SGL_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .dma_boundary = FCLOOP_DMABOUND_4G,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* optional features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .target_features = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* sizes of additional private data for data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .target_priv_sz = sizeof(struct fcloop_tport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct nvme_fc_port_info pinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct fcloop_ctrl_options *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct fcloop_lport_priv *lport_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) lport = kzalloc(sizeof(*lport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) opts = kzalloc(sizeof(*opts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto out_free_lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ret = fcloop_parse_options(opts, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* everything there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) memset(&pinfo, 0, sizeof(pinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) pinfo.node_name = opts->wwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) pinfo.port_name = opts->wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) pinfo.port_role = opts->roles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pinfo.port_id = opts->fcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) lport_priv = localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) lport_priv->lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) lport->localport = localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) INIT_LIST_HEAD(&lport->lport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) list_add_tail(&lport->lport_list, &fcloop_lports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) out_free_opts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) kfree(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) out_free_lport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* free only if we're going to fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) kfree(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) __unlink_local_port(struct fcloop_lport *lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) list_del(&lport->lport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) __wait_localport_unreg(struct fcloop_lport *lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) init_completion(&lport->unreg_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = nvme_fc_unregister_localport(lport->localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) wait_for_completion(&lport->unreg_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) kfree(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct fcloop_lport *tlport, *lport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) u64 nodename, portname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) list_for_each_entry(tlport, &fcloop_lports, lport_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (tlport->localport->node_name == nodename &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) tlport->localport->port_name == portname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) lport = tlport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) __unlink_local_port(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ret = __wait_localport_unreg(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static struct fcloop_nport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct fcloop_nport *newnport, *nport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct fcloop_lport *tmplport, *lport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct fcloop_ctrl_options *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) opts = kzalloc(sizeof(*opts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ret = fcloop_parse_options(opts, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* everything there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if ((opts->mask & opts_mask) != opts_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!newnport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) INIT_LIST_HEAD(&newnport->nport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) newnport->node_name = opts->wwnn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) newnport->port_name = opts->wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (opts->mask & NVMF_OPT_ROLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) newnport->port_role = opts->roles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (opts->mask & NVMF_OPT_FCADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) newnport->port_id = opts->fcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) kref_init(&newnport->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (tmplport->localport->node_name == opts->wwnn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) tmplport->localport->port_name == opts->wwpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto out_invalid_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (tmplport->localport->node_name == opts->lpwwnn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) tmplport->localport->port_name == opts->lpwwpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) lport = tmplport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (remoteport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) goto out_invalid_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) newnport->lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) list_for_each_entry(nport, &fcloop_nports, nport_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (nport->node_name == opts->wwnn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) nport->port_name == opts->wwpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if ((remoteport && nport->rport) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) (!remoteport && nport->tport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) nport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) goto out_invalid_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) fcloop_nport_get(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) nport->lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (opts->mask & NVMF_OPT_ROLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) nport->port_role = opts->roles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (opts->mask & NVMF_OPT_FCADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) nport->port_id = opts->fcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto out_free_newnport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) list_add_tail(&newnport->nport_list, &fcloop_nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) kfree(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return newnport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) out_invalid_opts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) out_free_newnport:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) kfree(newnport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) out_free_opts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) kfree(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct nvme_fc_remote_port *remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct fcloop_nport *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct fcloop_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct nvme_fc_port_info pinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) nport = fcloop_alloc_nport(buf, count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (!nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) memset(&pinfo, 0, sizeof(pinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) pinfo.node_name = nport->node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) pinfo.port_name = nport->port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) pinfo.port_role = nport->port_role;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) pinfo.port_id = nport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ret = nvme_fc_register_remoteport(nport->lport->localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) &pinfo, &remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (ret || !remoteport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) fcloop_nport_put(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) rport->remoteport = remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (nport->tport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) nport->tport->remoteport = remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) nport->tport->lport = nport->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) rport->nport = nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) rport->lport = nport->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) nport->rport = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) spin_lock_init(&rport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) INIT_LIST_HEAD(&rport->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static struct fcloop_rport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) __unlink_remote_port(struct fcloop_nport *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct fcloop_rport *rport = nport->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (rport && nport->tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) nport->tport->remoteport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) nport->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return nvme_fc_unregister_remoteport(rport->remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct fcloop_nport *nport = NULL, *tmpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static struct fcloop_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) u64 nodename, portname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (tmpport->node_name == nodename &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) tmpport->port_name == portname && tmpport->rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) nport = tmpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) rport = __unlink_remote_port(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (!nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ret = __remoteport_unreg(nport, rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct nvmet_fc_target_port *targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct fcloop_nport *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct nvmet_fc_port_info tinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) nport = fcloop_alloc_nport(buf, count, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) tinfo.node_name = nport->node_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) tinfo.port_name = nport->port_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) tinfo.port_id = nport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) &targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) fcloop_nport_put(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) tport->targetport = targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (nport->rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) nport->rport->targetport = targetport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) tport->nport = nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) tport->lport = nport->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) nport->tport = tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) spin_lock_init(&tport->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) INIT_LIST_HEAD(&tport->ls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static struct fcloop_tport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) __unlink_target_port(struct fcloop_nport *nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct fcloop_tport *tport = nport->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (tport && nport->rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) nport->rport->targetport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) nport->tport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return nvmet_fc_unregister_targetport(tport->targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct fcloop_nport *nport = NULL, *tmpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct fcloop_tport *tport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) u64 nodename, portname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (tmpport->node_name == nodename &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) tmpport->port_name == portname && tmpport->tport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) nport = tmpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) tport = __unlink_target_port(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = __targetport_unreg(nport, tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static struct attribute *fcloop_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) &dev_attr_add_local_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) &dev_attr_del_local_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) &dev_attr_add_remote_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) &dev_attr_del_remote_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) &dev_attr_add_target_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) &dev_attr_del_target_port.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) static struct attribute_group fclopp_dev_attrs_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) .attrs = fcloop_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static const struct attribute_group *fcloop_dev_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) &fclopp_dev_attrs_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) static struct class *fcloop_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static struct device *fcloop_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static int __init fcloop_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) fcloop_class = class_create(THIS_MODULE, "fcloop");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (IS_ERR(fcloop_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) pr_err("couldn't register class fcloop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ret = PTR_ERR(fcloop_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) fcloop_device = device_create_with_groups(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) fcloop_class, NULL, MKDEV(0, 0), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) fcloop_dev_attr_groups, "ctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (IS_ERR(fcloop_device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) pr_err("couldn't create ctl device!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ret = PTR_ERR(fcloop_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) goto out_destroy_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) get_device(fcloop_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) out_destroy_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) class_destroy(fcloop_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void __exit fcloop_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct fcloop_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct fcloop_nport *nport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct fcloop_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct fcloop_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) nport = list_first_entry_or_null(&fcloop_nports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) typeof(*nport), nport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (!nport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) tport = __unlink_target_port(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) rport = __unlink_remote_port(nport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ret = __targetport_unreg(nport, tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) pr_warn("%s: Failed deleting target port\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ret = __remoteport_unreg(nport, rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pr_warn("%s: Failed deleting remote port\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) lport = list_first_entry_or_null(&fcloop_lports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) typeof(*lport), lport_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) __unlink_local_port(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ret = __wait_localport_unreg(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) pr_warn("%s: Failed deleting local port\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) spin_lock_irqsave(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) spin_unlock_irqrestore(&fcloop_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) put_device(fcloop_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) device_destroy(fcloop_class, MKDEV(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) class_destroy(fcloop_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) module_init(fcloop_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) module_exit(fcloop_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) MODULE_LICENSE("GPL v2");