^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe Over Fabrics Target Passthrough command implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2017-2018 Western Digital Corporation or its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2019-2020, Eideticom Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "../host/nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * xarray to maintain one passthru subsystem per nvme controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static DEFINE_XARRAY(passthru_subsystems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u16 status = NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct nvme_id_ctrl *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int max_hw_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) id->cntlid = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) id->ver = cpu_to_le32(ctrl->subsys->ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The passthru NVMe driver may have a limit on the number of segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * which depends on the host's memory fragementation. To solve this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * ensure mdts is limited to the pages equal to the number of segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pctrl->max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * nvmet_passthru_map_sg is limitted to using a single bio so limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the mdts based on BIO_MAX_PAGES as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) max_hw_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) id->acl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * We export aerl limit for the fabrics controller, update this when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * passthru based aerl support is added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) id->aerl = NVMET_ASYNC_EVENTS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* emulate kas as most of the PCIe ctrl don't have a support for kas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) id->kas = cpu_to_le16(NVMET_KAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* don't support host memory buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) id->hmpre = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) id->hmmin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* don't support fuse commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) id->fuses = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (ctrl->ops->flags & NVMF_KEYED_SGLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) id->sgls |= cpu_to_le32(1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (req->port->inline_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) id->sgls |= cpu_to_le32(1 << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * When passsthru controller is setup using nvme-loop transport it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * code path with duplicate ctr subsynqn. In order to prevent that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* use fabric id-ctrl values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) req->port->inline_data_size) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) id->msdbd = ctrl->ops->msdbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Support multipath connections with fabrics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) id->cmic |= 1 << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u16 status = NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct nvme_id_ns *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < (id->nlbaf + 1); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (id->lbaf[i].ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) id->flbas = id->flbas & ~(1 << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Presently the NVMEof target code does not support sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * metadata, so we must disable it here. This should be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * once target starts supporting metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) id->mc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct request *rq = req->p.rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nvme_execute_passthru_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) status = nvme_req(rq)->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (status == NVME_SC_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) req->cmd->common.opcode == nvme_admin_identify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) switch (req->cmd->identify.cns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case NVME_ID_CNS_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) nvmet_passthru_override_id_ctrl(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) case NVME_ID_CNS_NS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) nvmet_passthru_override_id_ns(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) req->cqe->result = nvme_req(rq)->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void nvmet_passthru_req_done(struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) blk_status_t blk_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct nvmet_req *req = rq->end_io_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) req->cqe->result = nvme_req(rq)->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) nvmet_req_complete(req, nvme_req(rq)->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int op_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (req->sg_cnt > BIO_MAX_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (req->cmd->common.opcode == nvme_cmd_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) op_flags = REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else if (nvme_is_write(req->cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) op_flags = REQ_SYNC | REQ_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bio->bi_end_io = bio_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bio->bi_opf = req_op(rq) | op_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) for_each_sg(req->sg, sg, req->sg_cnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) sg->offset) < sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = blk_rq_append_bio(rq, &bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct request_queue *q = ctrl->admin_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct nvme_ns *ns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct request *rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 effects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (likely(req->sq->qid != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 nsid = le32_to_cpu(req->cmd->common.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ns = nvme_find_get_ns(ctrl, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (unlikely(!ns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_err("failed to get passthru ns nsid:%u\n", nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) status = NVME_SC_INVALID_NS | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) q = ns->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out_put_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (req->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = nvmet_passthru_map_sg(req, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto out_put_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * If there are effects for the command we are about to execute, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * an end_req function we need to use nvme_execute_passthru_rq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * synchronously in a work item seeing the end_req function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * nvme_passthru_end() can't be called in the request done callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * which is typically in interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (req->p.use_workqueue || effects) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) req->p.rq = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) schedule_work(&req->p.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) rq->end_io_data = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) nvmet_passthru_req_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) nvme_put_ns(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) out_put_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) blk_mq_free_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) out_put_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) nvme_put_ns(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * We need to emulate set host behaviour to ensure that any requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * behaviour of the target's host matches the requested behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * of the device's host and fail otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct nvme_feat_host_behavior *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u16 status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) goto out_complete_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) host, sizeof(*host), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto out_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) goto out_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_warn("target host has requested different behaviour from the local host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) out_free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) kfree(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) out_complete_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) req->p.use_workqueue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) req->execute = nvmet_passthru_execute_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Reject any commands with non-sgl flags set (ie. fused commands) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return NVME_SC_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) switch (req->cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) case nvme_cmd_resv_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case nvme_cmd_resv_report:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) case nvme_cmd_resv_acquire:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) case nvme_cmd_resv_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Reservations cannot be supported properly because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * underlying device has no way of differentiating different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * hosts that connect via fabrics. This could potentially be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * emulated in the future if regular targets grow support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * this feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return nvmet_setup_passthru_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Only features that are emulated or specifically allowed in the list are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * passed down to the controller. This function implements the allow list for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * both get and set features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) switch (le32_to_cpu(req->cmd->features.fid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case NVME_FEAT_ARBITRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) case NVME_FEAT_POWER_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) case NVME_FEAT_LBA_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case NVME_FEAT_TEMP_THRESH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) case NVME_FEAT_ERR_RECOVERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) case NVME_FEAT_VOLATILE_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) case NVME_FEAT_WRITE_ATOMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case NVME_FEAT_AUTO_PST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) case NVME_FEAT_TIMESTAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) case NVME_FEAT_HCTM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case NVME_FEAT_NOPSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case NVME_FEAT_RRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) case NVME_FEAT_PLM_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) case NVME_FEAT_PLM_WINDOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case NVME_FEAT_HOST_BEHAVIOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case NVME_FEAT_SANITIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return nvmet_setup_passthru_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* There is no support for forwarding ASYNC events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case NVME_FEAT_IRQ_COALESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case NVME_FEAT_IRQ_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* The IRQ settings will not apply to the target controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case NVME_FEAT_HOST_MEM_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Any HMB that's set will not be passed through and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * not work as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case NVME_FEAT_SW_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * The Pre-Boot Software Load Count doesn't make much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * sense for a target to export
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case NVME_FEAT_RESV_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case NVME_FEAT_RESV_PERSIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* No reservations, see nvmet_parse_passthru_io_cmd() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Reject any commands with non-sgl flags set (ie. fused commands) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return NVME_SC_INVALID_FIELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Passthru all vendor specific commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (req->cmd->common.opcode >= nvme_admin_vendor_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return nvmet_setup_passthru_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) switch (req->cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case nvme_admin_async_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) req->execute = nvmet_execute_async_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) case nvme_admin_keep_alive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Most PCIe ctrls don't support keep alive cmd, we route keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * alive to the non-passthru mode. In future please change this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * code when PCIe ctrls with keep alive support available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) req->execute = nvmet_execute_keep_alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case nvme_admin_set_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) switch (le32_to_cpu(req->cmd->features.fid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case NVME_FEAT_NUM_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) case NVME_FEAT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) req->execute = nvmet_execute_set_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case NVME_FEAT_HOST_BEHAVIOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) req->execute = nvmet_passthru_set_host_behaviour;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return nvmet_passthru_get_set_features(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case nvme_admin_get_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) switch (le32_to_cpu(req->cmd->features.fid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) case NVME_FEAT_NUM_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case NVME_FEAT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) req->execute = nvmet_execute_get_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return nvmet_passthru_get_set_features(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) case nvme_admin_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) switch (req->cmd->identify.cns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case NVME_ID_CNS_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) req->execute = nvmet_passthru_execute_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) req->p.use_workqueue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) case NVME_ID_CNS_CS_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) switch (req->cmd->identify.csi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) case NVME_CSI_ZNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) req->execute = nvmet_passthru_execute_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) req->p.use_workqueue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case NVME_ID_CNS_NS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) req->execute = nvmet_passthru_execute_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) req->p.use_workqueue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case NVME_ID_CNS_CS_NS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) switch (req->cmd->identify.csi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case NVME_CSI_ZNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) req->execute = nvmet_passthru_execute_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) req->p.use_workqueue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return nvmet_setup_passthru_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case nvme_admin_get_log_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return nvmet_setup_passthru_command(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Reject commands not in the allowlist above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) void *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mutex_lock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!subsys->passthru_ctrl_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (subsys->passthru_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (subsys->nr_namespaces) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ctrl = nvme_ctrl_from_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pr_err("failed to open nvme controller %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) subsys->passthru_ctrl_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto out_put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) subsys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (xa_is_err(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = xa_err(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto out_put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto out_put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) subsys->passthru_ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) subsys->ver = ctrl->vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (subsys->ver < NVME_VS(1, 2, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) NVME_TERTIARY(subsys->ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) subsys->ver = NVME_VS(1, 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) nvme_get_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) __module_get(subsys->passthru_ctrl->ops->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) out_put_file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) filp_close(file, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mutex_unlock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (subsys->passthru_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) module_put(subsys->passthru_ctrl->ops->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) nvme_put_ctrl(subsys->passthru_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) subsys->passthru_ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) subsys->ver = NVMET_DEFAULT_VS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mutex_lock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) __nvmet_passthru_ctrl_disable(subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mutex_unlock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mutex_lock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) __nvmet_passthru_ctrl_disable(subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) mutex_unlock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) kfree(subsys->passthru_ctrl_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }