^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe Fabrics command implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2015-2016 HGST, a Western Digital Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static void nvmet_execute_prop_set(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) u64 val = le64_to_cpu(req->cmd->prop_set.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (req->cmd->prop_set.attrib & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) offsetof(struct nvmf_property_set_command, attrib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) switch (le32_to_cpu(req->cmd->prop_set.offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) case NVME_REG_CC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) nvmet_update_cc(req->sq->ctrl, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) offsetof(struct nvmf_property_set_command, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void nvmet_execute_prop_get(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (req->cmd->prop_get.attrib & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) switch (le32_to_cpu(req->cmd->prop_get.offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) case NVME_REG_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) val = ctrl->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) switch (le32_to_cpu(req->cmd->prop_get.offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) case NVME_REG_VS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) val = ctrl->subsys->ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) case NVME_REG_CC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) val = ctrl->cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) case NVME_REG_CSTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) val = ctrl->csts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (status && req->cmd->prop_get.attrib & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) offsetof(struct nvmf_property_get_command, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) offsetof(struct nvmf_property_get_command, attrib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) req->cqe->result.u64 = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) switch (cmd->fabrics.fctype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case nvme_fabrics_type_property_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) req->execute = nvmet_execute_prop_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case nvme_fabrics_type_property_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) req->execute = nvmet_execute_prop_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pr_err("received unknown capsule type 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cmd->fabrics.fctype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) req->error_loc = offsetof(struct nvmf_common_command, fctype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct nvmf_connect_command *c = &req->cmd->connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u16 qid = le16_to_cpu(c->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u16 sqsize = le16_to_cpu(c->sqsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct nvmet_ctrl *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u16 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pr_warn("queue already connected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) req->error_loc = offsetof(struct nvmf_connect_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!sqsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pr_warn("queue size zero!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* note: convert queue size from 0's-based value to 1's-based value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) req->sq->sqhd_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) req->cqe->sq_head = cpu_to_le16(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ctrl->ops->install_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret = ctrl->ops->install_queue(req->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_err("failed to install queue %d cntlid %d ret %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) qid, ctrl->cntlid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) req->sq->ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void nvmet_execute_admin_connect(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct nvmf_connect_command *c = &req->cmd->connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct nvmf_connect_data *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct nvmet_ctrl *ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) d = kmalloc(sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* zero out initial completion result, assign values as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) req->cqe->result.u32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (c->recfmt != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_warn("invalid connect version (%d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) le16_to_cpu(c->recfmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_warn("connect attempt for invalid controller ID %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) d->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) le32_to_cpu(c->kato), &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) uuid_copy(&ctrl->hostid, &d->hostid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) status = nvmet_install_queue(ctrl, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) nvmet_ctrl_put(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ctrl->pi_support ? " T10-PI is enabled" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void nvmet_execute_io_connect(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct nvmf_connect_command *c = &req->cmd->connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct nvmf_connect_data *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct nvmet_ctrl *ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u16 qid = le16_to_cpu(c->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) d = kmalloc(sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* zero out initial completion result, assign values as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) req->cqe->result.u32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (c->recfmt != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pr_warn("invalid connect version (%d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) le16_to_cpu(c->recfmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) le16_to_cpu(d->cntlid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) req, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (unlikely(qid > ctrl->subsys->max_qid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) pr_warn("invalid queue id (%d)\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out_ctrl_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) status = nvmet_install_queue(ctrl, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out_ctrl_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* pass back cntlid for successful completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) out_ctrl_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) nvmet_ctrl_put(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!nvme_is_fabrics(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pr_err("invalid command 0x%x on unconnected queue.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cmd->fabrics.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pr_err("invalid capsule type 0x%x on unconnected queue.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cmd->fabrics.fctype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) req->error_loc = offsetof(struct nvmf_common_command, fctype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (cmd->connect.qid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) req->execute = nvmet_execute_admin_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) req->execute = nvmet_execute_io_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }