^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe over Fabrics loopback device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2015-2016 HGST, a Western Digital Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/nvme.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "../host/nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "../host/fabrics.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define NVME_LOOP_MAX_SEGMENTS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct nvme_loop_iod {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct nvme_request nvme_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct nvme_completion cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct nvmet_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct nvme_loop_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct sg_table sg_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct scatterlist first_sgl[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct nvme_loop_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct nvme_loop_queue *queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct blk_mq_tag_set admin_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct nvme_loop_iod async_event_iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct nvme_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct nvmet_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum nvme_loop_queue_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) NVME_LOOP_Q_LIVE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct nvme_loop_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct nvmet_cq nvme_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct nvmet_sq nvme_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct nvme_loop_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static LIST_HEAD(nvme_loop_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static DEFINE_MUTEX(nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static LIST_HEAD(nvme_loop_ctrl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static const struct nvmet_fabrics_ops nvme_loop_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return queue - queue->ctrl->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void nvme_loop_complete_rq(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) nvme_complete_rq(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 queue_idx = nvme_loop_queue_idx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (queue_idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return queue->ctrl->admin_tag_set.tags[queue_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return queue->ctrl->tag_set.tags[queue_idx - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void nvme_loop_queue_response(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct nvme_loop_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) container_of(req->sq, struct nvme_loop_queue, nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct nvme_completion *cqe = req->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * AEN requests are special as they don't time out and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * survive any kind of queue freeze and often don't respond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * aborts. We don't even bother to allocate a struct request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * for them but rather special case them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cqe->command_id))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) &cqe->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dev_err(queue->ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "got bad command_id %#x on queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cqe->command_id, nvme_loop_queue_idx(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) nvme_loop_complete_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void nvme_loop_execute_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct nvme_loop_iod *iod =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) container_of(work, struct nvme_loop_iod, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) iod->req.execute(&iod->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct nvme_ns *ns = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct nvme_loop_queue *queue = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct request *req = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ret = nvme_setup_cmd(ns, req, &iod->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) iod->req.port = queue->ctrl->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) &queue->nvme_sq, &nvme_loop_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (blk_rq_nr_phys_segments(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) iod->sg_table.sgl = iod->first_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (sg_alloc_table_chained(&iod->sg_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) blk_rq_nr_phys_segments(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) nvme_cleanup_cmd(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) iod->req.sg = iod->sg_table.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) iod->req.transfer_len = blk_rq_payload_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) schedule_work(&iod->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct nvme_loop_queue *queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct nvme_loop_iod *iod = &ctrl->async_event_iod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) memset(&iod->cmd, 0, sizeof(iod->cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) iod->cmd.common.opcode = nvme_admin_async_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) &nvme_loop_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dev_err(ctrl->ctrl.device, "failed async event work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) schedule_work(&iod->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct nvme_loop_iod *iod, unsigned int queue_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) iod->req.cmd = &iod->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) iod->req.cqe = &iod->cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) iod->queue = &ctrl->queues[queue_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) INIT_WORK(&iod->work, nvme_loop_execute_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int nvme_loop_init_request(struct blk_mq_tag_set *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct request *req, unsigned int hctx_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct nvme_loop_ctrl *ctrl = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) nvme_req(req)->ctrl = &ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct nvme_loop_ctrl *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) hctx->driver_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct nvme_loop_ctrl *ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct nvme_loop_queue *queue = &ctrl->queues[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) BUG_ON(hctx_idx != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) hctx->driver_data = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static const struct blk_mq_ops nvme_loop_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .queue_rq = nvme_loop_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .complete = nvme_loop_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .init_request = nvme_loop_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .init_hctx = nvme_loop_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .queue_rq = nvme_loop_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .complete = nvme_loop_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .init_request = nvme_loop_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .init_hctx = nvme_loop_init_admin_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) blk_cleanup_queue(ctrl->ctrl.admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) blk_cleanup_queue(ctrl->ctrl.fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) blk_mq_free_tag_set(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (list_empty(&ctrl->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto free_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_lock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) list_del(&ctrl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mutex_unlock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (nctrl->tagset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) blk_cleanup_queue(ctrl->ctrl.connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) blk_mq_free_tag_set(&ctrl->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kfree(ctrl->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) nvmf_free_options(nctrl->opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) free_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) kfree(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) for (i = 1; i < ctrl->ctrl.queue_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ctrl->ctrl.queue_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int nr_io_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (ret || !nr_io_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = 1; i <= nr_io_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ctrl->queues[i].ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto out_destroy_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ctrl->ctrl.queue_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) out_destroy_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) nvme_loop_destroy_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) for (i = 1; i < ctrl->ctrl.queue_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ctrl->admin_tag_set.driver_data = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ctrl->admin_tag_set.nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ctrl->queues[0].ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ctrl->ctrl.queue_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out_free_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (IS_ERR(ctrl->ctrl.fabrics_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) error = PTR_ERR(ctrl->ctrl.fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto out_free_tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (IS_ERR(ctrl->ctrl.admin_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) error = PTR_ERR(ctrl->ctrl.admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto out_cleanup_fabrics_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) error = nvmf_connect_admin_queue(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto out_cleanup_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) error = nvme_enable_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto out_cleanup_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ctrl->ctrl.max_hw_sectors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) error = nvme_init_identify(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto out_cleanup_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) out_cleanup_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) blk_cleanup_queue(ctrl->ctrl.admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) out_cleanup_fabrics_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) blk_cleanup_queue(ctrl->ctrl.fabrics_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) out_free_tagset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) blk_mq_free_tag_set(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) out_free_sq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ctrl->ctrl.queue_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) nvme_stop_queues(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) blk_mq_tagset_busy_iter(&ctrl->tag_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nvme_cancel_request, &ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) nvme_loop_destroy_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (ctrl->ctrl.state == NVME_CTRL_LIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) nvme_shutdown_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) nvme_cancel_request, &ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) nvme_loop_destroy_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct nvme_loop_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mutex_lock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ctrl->ctrl.cntlid == nctrl->cntlid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) nvme_delete_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mutex_unlock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void nvme_loop_reset_ctrl_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct nvme_loop_ctrl *ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) nvme_stop_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) nvme_loop_shutdown_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* state change failure for non-deleted ctrl? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = nvme_loop_configure_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = nvme_loop_init_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto out_destroy_admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = nvme_loop_connect_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto out_destroy_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) blk_mq_update_nr_hw_queues(&ctrl->tag_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ctrl->ctrl.queue_count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) nvme_start_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out_destroy_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nvme_loop_destroy_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) out_destroy_admin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) nvme_loop_destroy_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) nvme_uninit_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .name = "loop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .flags = NVME_F_FABRICS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .reg_read32 = nvmf_reg_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .reg_read64 = nvmf_reg_read64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .reg_write32 = nvmf_reg_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .free_ctrl = nvme_loop_free_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .submit_async_event = nvme_loop_submit_async_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .delete_ctrl = nvme_loop_delete_ctrl_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .get_address = nvmf_get_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = nvme_loop_init_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ctrl->tag_set.ops = &nvme_loop_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ctrl->tag_set.reserved_tags = 1; /* fabric connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ctrl->tag_set.driver_data = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ctrl->ctrl.tagset = &ctrl->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto out_destroy_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (IS_ERR(ctrl->ctrl.connect_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = PTR_ERR(ctrl->ctrl.connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto out_free_tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = nvme_loop_connect_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto out_cleanup_connect_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) out_cleanup_connect_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) blk_cleanup_queue(ctrl->ctrl.connect_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) out_free_tagset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) blk_mq_free_tag_set(&ctrl->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) out_destroy_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) nvme_loop_destroy_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct nvmet_port *p, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mutex_lock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) list_for_each_entry(p, &nvme_loop_ports, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* if no transport address is specified use the first port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) found = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mutex_unlock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct nvme_loop_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (!ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ctrl->ctrl.opts = opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) INIT_LIST_HEAD(&ctrl->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 0 /* no quirks, we're perfect! */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kfree(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ctrl->ctrl.sqsize = opts->queue_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ctrl->ctrl.kato = opts->kato;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!ctrl->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out_uninit_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ret = nvme_loop_configure_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto out_free_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (opts->queue_size > ctrl->ctrl.maxcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* warn if maxcmd is lower than queue_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev_warn(ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) "queue_size %zu > ctrl maxcmd %u, clamping down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) opts->queue_size, ctrl->ctrl.maxcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) opts->queue_size = ctrl->ctrl.maxcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (opts->nr_io_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ret = nvme_loop_create_io_queues(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto out_remove_admin_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_info(ctrl->ctrl.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mutex_lock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) mutex_unlock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) nvme_start_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return &ctrl->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) out_remove_admin_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nvme_loop_destroy_admin_queue(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) out_free_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) kfree(ctrl->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) out_uninit_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) nvme_uninit_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) nvme_put_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int nvme_loop_add_port(struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) mutex_lock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list_add_tail(&port->entry, &nvme_loop_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mutex_unlock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void nvme_loop_remove_port(struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) mutex_lock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) list_del_init(&port->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) mutex_unlock(&nvme_loop_ports_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Ensure any ctrls that are in the process of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * deleted are in fact deleted before we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * and free the port. This is to prevent active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * ctrls from using a port after it's freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) flush_workqueue(nvme_delete_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static const struct nvmet_fabrics_ops nvme_loop_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .type = NVMF_TRTYPE_LOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .add_port = nvme_loop_add_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .remove_port = nvme_loop_remove_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .queue_response = nvme_loop_queue_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .delete_ctrl = nvme_loop_delete_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static struct nvmf_transport_ops nvme_loop_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .name = "loop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) .create_ctrl = nvme_loop_create_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) .allowed_opts = NVMF_OPT_TRADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int __init nvme_loop_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ret = nvmet_register_transport(&nvme_loop_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ret = nvmf_register_transport(&nvme_loop_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) nvmet_unregister_transport(&nvme_loop_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static void __exit nvme_loop_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct nvme_loop_ctrl *ctrl, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) nvmf_unregister_transport(&nvme_loop_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) nvmet_unregister_transport(&nvme_loop_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mutex_lock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) nvme_delete_ctrl(&ctrl->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) mutex_unlock(&nvme_loop_ctrl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) flush_workqueue(nvme_delete_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) module_init(nvme_loop_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) module_exit(nvme_loop_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */