^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe admin command implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2015-2016 HGST, a Western Digital Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/part_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <generated/utsrelease.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) u32 nvmet_get_log_page_len(struct nvme_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u32 len = le16_to_cpu(cmd->get_log_page.numdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) len <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) len += le16_to_cpu(cmd->get_log_page.numdl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* NUMD is a 0's based value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) len += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) len *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) switch (cdw10 & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) case NVME_FEAT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return sizeof(req->sq->ctrl->hostid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return le64_to_cpu(cmd->get_log_page.lpo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) off_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u64 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) spin_lock_irqsave(&ctrl->error_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) sizeof(struct nvme_error_slot)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (slot == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) slot = NVMET_ERROR_LOG_SLOTS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) slot--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) offset += sizeof(struct nvme_error_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_unlock_irqrestore(&ctrl->error_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct nvme_smart_log *slog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct nvmet_ns *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u64 host_reads, host_writes, data_units_read, data_units_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pr_err("Could not find namespace id : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) le32_to_cpu(req->cmd->get_log_page.nsid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) req->error_loc = offsetof(struct nvme_rw_command, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return NVME_SC_INVALID_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* we don't have the right data for file backed ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!ns->bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) sectors[READ]), 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) sectors[WRITE]), 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) put_unaligned_le64(host_reads, &slog->host_reads[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) put_unaligned_le64(host_writes, &slog->host_writes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) nvmet_put_namespace(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct nvme_smart_log *slog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u64 host_reads = 0, host_writes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 data_units_read = 0, data_units_written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct nvmet_ns *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct nvmet_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* we don't have the right data for file backed ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!ns->bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) data_units_read += DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) data_units_written += DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) put_unaligned_le64(host_reads, &slog->host_reads[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) put_unaligned_le64(host_writes, &slog->host_writes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct nvme_smart_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u16 status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (req->transfer_len != sizeof(*log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) log = kzalloc(sizeof(*log), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) status = nvmet_get_smart_log_all(req, log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) status = nvmet_get_smart_log_nsid(req, log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto out_free_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) put_unaligned_le64(req->sq->ctrl->err_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) &log->num_err_log_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) out_free_log:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kfree(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u16 status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct nvme_effects_log *log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) log = kzalloc(sizeof(*log), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) kfree(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u16 status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) mutex_lock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ctrl->nr_changed_ns == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) len = sizeof(__le32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) len = ctrl->nr_changed_ns * sizeof(__le32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) status = nvmet_zero_sgl(req, len, req->transfer_len - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ctrl->nr_changed_ns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mutex_unlock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct nvme_ana_group_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct nvmet_ns *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) xa_for_each(&ctrl->subsys->namespaces, idx, ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (ns->anagrpid == grpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) desc->nsids[count++] = cpu_to_le32(ns->nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) desc->grpid = cpu_to_le32(grpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) desc->nnsids = cpu_to_le32(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) desc->state = req->port->ana_state[grpid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct nvme_ana_rsp_hdr hdr = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct nvme_ana_group_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 grpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u16 ngrps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) down_read(&nvmet_ana_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!nvmet_ana_group_enabled[grpid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) len = nvmet_format_ana_group(req, grpid, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) status = nvmet_copy_to_sgl(req, offset, desc, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ngrps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (nvmet_ana_group_enabled[grpid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ngrps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) hdr.ngrps = cpu_to_le16(ngrps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) up_read(&nvmet_ana_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* copy the header last once we know the number of groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void nvmet_execute_get_log_page(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) switch (req->cmd->get_log_page.lid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case NVME_LOG_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return nvmet_execute_get_log_page_error(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) case NVME_LOG_SMART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return nvmet_execute_get_log_page_smart(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) case NVME_LOG_FW_SLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * We only support a single firmware slot which always is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * active, so we can zero out the whole firmware slot log and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * still claim to fully implement this mandatory log page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return nvmet_execute_get_log_page_noop(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case NVME_LOG_CHANGED_NS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return nvmet_execute_get_log_changed_ns(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case NVME_LOG_CMD_EFFECTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return nvmet_execute_get_log_cmd_effects_ns(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) case NVME_LOG_ANA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return nvmet_execute_get_log_page_ana(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pr_debug("unhandled lid %d on qid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) req->cmd->get_log_page.lid, req->sq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) const char *model = NVMET_DEFAULT_CTRL_MODEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct nvmet_subsys_model *subsys_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) subsys_model = rcu_dereference(subsys->model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (subsys_model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) model = subsys_model->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct nvme_id_ctrl *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u32 cmd_capsule_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* XXX: figure out how to assign real vendors IDs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) id->vid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) id->ssvid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) memset(id->sn, ' ', sizeof(id->sn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bin2hex(id->sn, &ctrl->subsys->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) nvmet_id_set_model_number(id, ctrl->subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) memcpy_and_pad(id->fr, sizeof(id->fr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) UTS_RELEASE, strlen(UTS_RELEASE), ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) id->rab = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * XXX: figure out how we can assign a IEEE OUI, but until then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * the safest is to leave it as zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* we support multiple ports, multiples hosts and ANA: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Limit MDTS according to transport capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (ctrl->ops->get_mdts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) id->mdts = ctrl->ops->get_mdts(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) id->mdts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) id->cntlid = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) id->ver = cpu_to_le32(ctrl->subsys->ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* XXX: figure out what to do about RTD3R/RTD3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) NVME_CTRL_ATTR_TBKAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) id->oacs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * We don't really have a practical limit on the number of abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * comands. But we don't do anything useful for abort either, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * no point in allowing more abort commands than the spec requires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) id->acl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) id->aerl = NVMET_ASYNC_EVENTS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* first slot is read-only, only one slot supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) id->frmw = (1 << 0) | (1 << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) id->npss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* We support keep-alive timeout in granularity of seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) id->kas = cpu_to_le16(NVMET_KAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) id->sqes = (0x6 << 4) | 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) id->cqes = (0x4 << 4) | 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) NVME_CTRL_ONCS_WRITE_ZEROES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* XXX: don't report vwc if the underlying device is write through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) id->vwc = NVME_CTRL_VWC_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * We can't support atomic writes bigger than a LBA without support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * from the backend device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) id->awun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) id->awupf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ctrl->ops->flags & NVMF_KEYED_SGLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) id->sgls |= cpu_to_le32(1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (req->port->inline_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) id->sgls |= cpu_to_le32(1 << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Max command capsule size is sqe + in-capsule data size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Disable in-capsule data for Metadata capable controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cmd_capsule_size = sizeof(struct nvme_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!ctrl->pi_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cmd_capsule_size += req->port->inline_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Max response capsule size is cqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) id->msdbd = ctrl->ops->msdbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) id->anatt = 10; /* random value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * Meh, we don't really support any power state. Fake up the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * values that qemu does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) id->psd[0].max_power = cpu_to_le16(0x9c4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) id->psd[0].entry_lat = cpu_to_le32(0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) id->psd[0].exit_lat = cpu_to_le32(0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) id->nwpc = 1 << 0; /* write protect and no write protect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void nvmet_execute_identify_ns(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct nvme_id_ns *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) req->error_loc = offsetof(struct nvme_identify, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) status = NVME_SC_INVALID_NS | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* return an all zeroed buffer if we can't find an active namespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!req->ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) nvmet_ns_revalidate(req->ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * nuse = ncap = nsze isn't always true, but we have no way to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * that out from the underlying device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) id->ncap = id->nsze =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) switch (req->port->ana_state[req->ns->anagrpid]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case NVME_ANA_INACCESSIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) case NVME_ANA_PERSISTENT_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) id->nuse = id->nsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (req->ns->bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) nvmet_bdev_set_limits(req->ns->bdev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * We just provide a single LBA format that matches what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * underlying device reports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) id->nlbaf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) id->flbas = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Our namespace might always be shared. Not just with other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * controllers, but also with any other user of the block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) id->nmic = (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) id->anagrpid = cpu_to_le32(req->ns->anagrpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) id->lbaf[0].ds = req->ns->blksize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) NVME_NS_DPC_PI_TYPE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) id->mc = NVME_MC_EXTENDED_LBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) id->dps = req->ns->pi_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) id->flbas = NVME_NS_FLBAS_META_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (req->ns->readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) id->nsattr |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void nvmet_execute_identify_nslist(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct nvmet_ns *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __le32 *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) list = kzalloc(buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ns->nsid <= min_nsid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) list[i++] = cpu_to_le32(ns->nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (i == buf_size / sizeof(__le32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) status = nvmet_copy_to_sgl(req, 0, list, buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) kfree(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) void *id, off_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct nvme_ns_id_desc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .nidt = type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .nidl = len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *off += sizeof(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) status = nvmet_copy_to_sgl(req, *off, id, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) *off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void nvmet_execute_identify_desclist(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct nvmet_ns *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) off_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) req->error_loc = offsetof(struct nvme_identify, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) status = NVME_SC_INVALID_NS | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) NVME_NIDT_UUID_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) &ns->uuid, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto out_put_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) NVME_NIDT_NGUID_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &ns->nguid, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto out_put_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) off) != NVME_IDENTIFY_DATA_SIZE - off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) status = NVME_SC_INTERNAL | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) out_put_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) nvmet_put_namespace(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void nvmet_execute_identify(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) switch (req->cmd->identify.cns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case NVME_ID_CNS_NS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return nvmet_execute_identify_ns(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case NVME_ID_CNS_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return nvmet_execute_identify_ctrl(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) case NVME_ID_CNS_NS_ACTIVE_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return nvmet_execute_identify_nslist(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case NVME_ID_CNS_NS_DESC_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return nvmet_execute_identify_desclist(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pr_debug("unhandled identify cns %d on qid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) req->cmd->identify.cns, req->sq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) req->error_loc = offsetof(struct nvme_identify, cns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * A "minimum viable" abort implementation: the command is mandatory in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * spec, but we are not required to do any useful work. We couldn't really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * do a useful abort, so don't bother even with waiting for the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * to be exectuted and return immediately telling the command to abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * wasn't found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void nvmet_execute_abort(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) nvmet_set_result(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (req->ns->file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) status = nvmet_file_flush(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) status = nvmet_bdev_flush(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (unlikely(!req->ns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) req->error_loc = offsetof(struct nvme_common_command, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mutex_lock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) switch (write_protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case NVME_NS_WRITE_PROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) req->ns->readonly = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) status = nvmet_write_protect_flush_sync(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) req->ns->readonly = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case NVME_NS_NO_WRITE_PROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) req->ns->readonly = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) nvmet_ns_changed(subsys, req->ns->nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) mutex_unlock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) u16 nvmet_set_feat_kato(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) nvmet_stop_keep_alive_timer(req->sq->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) nvmet_start_keep_alive_timer(req->sq->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) nvmet_set_result(req, req->sq->ctrl->kato);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (val32 & ~mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) req->error_loc = offsetof(struct nvme_common_command, cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) nvmet_set_result(req, val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) void nvmet_execute_set_features(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u16 nsqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) u16 ncqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) switch (cdw10 & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case NVME_FEAT_NUM_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ncqr = (cdw11 >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) nsqr = cdw11 & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (ncqr == 0xffff || nsqr == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) nvmet_set_result(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) status = nvmet_set_feat_kato(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) case NVME_FEAT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) case NVME_FEAT_WRITE_PROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) status = nvmet_set_feat_write_protect(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) req->error_loc = offsetof(struct nvme_common_command, cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!req->ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) req->error_loc = offsetof(struct nvme_common_command, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return NVME_SC_INVALID_NS | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mutex_lock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (req->ns->readonly == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) result = NVME_NS_WRITE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) result = NVME_NS_NO_WRITE_PROTECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) nvmet_set_result(req, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mutex_unlock(&subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) void nvmet_get_feat_kato(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) nvmet_set_result(req, req->sq->ctrl->kato * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void nvmet_get_feat_async_event(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) void nvmet_execute_get_features(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) switch (cdw10 & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * These features are mandatory in the spec, but we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * have a useful way to implement them. We'll eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * need to come up with some fake values for these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case NVME_FEAT_ARBITRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) case NVME_FEAT_POWER_MGMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case NVME_FEAT_TEMP_THRESH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case NVME_FEAT_ERR_RECOVERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) case NVME_FEAT_IRQ_COALESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) case NVME_FEAT_IRQ_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) case NVME_FEAT_WRITE_ATOMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) nvmet_get_feat_async_event(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case NVME_FEAT_VOLATILE_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) nvmet_set_result(req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case NVME_FEAT_NUM_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) nvmet_set_result(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) nvmet_get_feat_kato(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case NVME_FEAT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* need 128-bit host identifier flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) offsetof(struct nvme_common_command, cdw11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) sizeof(req->sq->ctrl->hostid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) case NVME_FEAT_WRITE_PROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) status = nvmet_get_feat_write_protect(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) offsetof(struct nvme_common_command, cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) void nvmet_execute_async_event(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) mutex_lock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mutex_unlock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) mutex_unlock(&ctrl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) schedule_work(&ctrl->async_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) void nvmet_execute_keep_alive(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) pr_debug("ctrl %d update keep-alive timer for %d secs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ctrl->cntlid, ctrl->kato);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u16 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (nvme_is_fabrics(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return nvmet_parse_fabrics_cmd(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return nvmet_parse_discovery_cmd(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ret = nvmet_check_ctrl_status(req, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (nvmet_req_passthru_ctrl(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return nvmet_parse_passthru_admin_cmd(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) switch (cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) case nvme_admin_get_log_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) req->execute = nvmet_execute_get_log_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case nvme_admin_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) req->execute = nvmet_execute_identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case nvme_admin_abort_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) req->execute = nvmet_execute_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case nvme_admin_set_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) req->execute = nvmet_execute_set_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case nvme_admin_get_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) req->execute = nvmet_execute_get_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) case nvme_admin_async_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) req->execute = nvmet_execute_async_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case nvme_admin_keep_alive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) req->execute = nvmet_execute_keep_alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) req->sq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }