^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NVMe I/O command implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2015-2016 HGST, a Western Digital Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* Number of logical blocks per physical block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) const u32 lpp = ql->physical_block_size / ql->logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* Logical blocks per physical block, 0's based. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) const __le16 lpp0b = to0based(lpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * NAWUPF, and NACWU are defined for this namespace and should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * used by the host for this namespace instead of the AWUN, AWUPF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * and ACWU fields in the Identify Controller data structure. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * any of these fields are zero that means that the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * field from the identify controller data structure should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) id->nsfeat |= 1 << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) id->nawun = lpp0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) id->nawupf = lpp0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) id->nacwu = lpp0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * NOWS are defined for this namespace and should be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * the host for I/O optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) id->nsfeat |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* NPWG = Namespace Preferred Write Granularity. 0's based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) id->npwg = lpp0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* NPWA = Namespace Preferred Write Alignment. 0's based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) id->npwa = id->npwg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* NPDG = Namespace Preferred Deallocate Alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) id->npda = id->npdg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* NOWS = Namespace Optimal Write Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) id->nows = to0based(ql->io_opt / ql->logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (bi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ns->metadata_size = bi->tuple_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (bi->profile == &t10_pi_type1_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ns->pi_type = NVME_NS_DPS_PI_TYPE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) else if (bi->profile == &t10_pi_type3_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ns->pi_type = NVME_NS_DPS_PI_TYPE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Unsupported metadata type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ns->metadata_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ns->bdev = blkdev_get_by_path(ns->device_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) FMODE_READ | FMODE_WRITE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (IS_ERR(ns->bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret = PTR_ERR(ns->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (ret != -ENOTBLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pr_err("failed to open block device %s: (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ns->device_path, PTR_ERR(ns->bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ns->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ns->size = i_size_read(ns->bdev->bd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ns->pi_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ns->metadata_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) nvmet_bdev_ns_enable_integrity(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (ns->bdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ns->bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ns->size = i_size_read(ns->bdev->bd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u16 status = NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (likely(blk_sts == BLK_STS_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Right now there exists M : 1 mapping between block layer error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * to the NVMe status code (see nvme_error_status()). For consistency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * when we reverse map we use most appropriate NVMe Status code from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * the group of the NVMe staus codes used in the nvme_error_status().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) switch (blk_sts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) case BLK_STS_NOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) req->error_loc = offsetof(struct nvme_rw_command, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) case BLK_STS_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) req->error_loc = offsetof(struct nvme_rw_command, slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case BLK_STS_NOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) switch (req->cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) case nvme_cmd_dsm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) case nvme_cmd_write_zeroes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case BLK_STS_MEDIUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) status = NVME_SC_ACCESS_DENIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) req->error_loc = offsetof(struct nvme_rw_command, nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case BLK_STS_IOERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) status = NVME_SC_INTERNAL | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) switch (req->cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case nvme_cmd_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case nvme_cmd_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) req->error_slba = le64_to_cpu(req->cmd->rw.slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case nvme_cmd_write_zeroes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) req->error_slba =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) le64_to_cpu(req->cmd->write_zeroes.slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) req->error_slba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void nvmet_bio_done(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct nvmet_req *req = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (bio != &req->b.inline_bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct sg_mapping_iter *miter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct blk_integrity *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct bio_integrity_payload *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct block_device *bdev = req->ns->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) size_t resid, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bi = bdev_get_integrity(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (unlikely(!bi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pr_err("Unable to locate bio_integrity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bip = bio_integrity_alloc(bio, GFP_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (IS_ERR(bip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pr_err("Unable to allocate bio_integrity_payload\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return PTR_ERR(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* virtual start sector must be in integrity interval units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bip_set_seed(bip, bio->bi_iter.bi_sector >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) (bi->interval_exp - SECTOR_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) resid = bip->bip_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) while (resid > 0 && sg_miter_next(miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) len = min_t(size_t, miter->length, resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) rc = bio_integrity_add_page(bio, miter->page, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) offset_in_page(miter->addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (unlikely(rc != len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pr_err("bio_integrity_add_page() failed; %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) resid -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (len < miter->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) miter->consumed -= miter->length - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sg_miter_stop(miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct sg_mapping_iter *miter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #endif /* CONFIG_BLK_DEV_INTEGRITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void nvmet_bdev_execute_rw(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int sg_cnt = req->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int op, i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct sg_mapping_iter prot_miter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int iter_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!nvmet_check_transfer_len(req, total_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!req->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (req->cmd->rw.opcode == nvme_cmd_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) op |= REQ_FUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) iter_flags = SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) op = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) iter_flags = SG_MITER_FROM_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (is_pci_p2pdma_page(sg_page(req->sg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) op |= REQ_NOMERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (nvmet_use_inline_bvec(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bio = &req->b.inline_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bio_set_dev(bio, req->ns->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) bio->bi_private = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bio->bi_end_io = nvmet_bio_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bio->bi_opf = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (req->metadata_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sg_miter_start(&prot_miter, req->metadata_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) req->metadata_sg_cnt, iter_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for_each_sg(req->sg, sg, req->sg_cnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) != sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct bio *prev = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (req->metadata_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rc = nvmet_bdev_alloc_bip(req, bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) &prot_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bio_set_dev(bio, req->ns->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bio->bi_iter.bi_sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bio->bi_opf = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) bio_chain(bio, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) submit_bio(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sector += sg->length >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sg_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (req->metadata_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void nvmet_bdev_execute_flush(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct bio *bio = &req->b.inline_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) bio_set_dev(bio, req->ns->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) bio->bi_private = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bio->bi_end_io = nvmet_bio_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u16 nvmet_bdev_flush(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return NVME_SC_INTERNAL | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct nvme_dsm_range *range, struct bio **bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct nvmet_ns *ns = req->ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = __blkdev_issue_discard(ns->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) nvmet_lba_to_sect(ns, range->slba),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) GFP_KERNEL, 0, bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ret && ret != -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) req->error_slba = le64_to_cpu(range->slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return errno_to_nvme_status(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void nvmet_bdev_execute_discard(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct nvme_dsm_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) sizeof(range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) status = nvmet_bdev_discard_range(req, &range, &bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) bio->bi_private = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bio->bi_end_io = nvmet_bio_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bio_io_error(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) switch (le32_to_cpu(req->cmd->dsm.attributes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case NVME_DSMGMT_AD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) nvmet_bdev_execute_discard(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case NVME_DSMGMT_IDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case NVME_DSMGMT_IDW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sector_t nr_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) (req->ns->blksize_shift - 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) GFP_KERNEL, &bio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bio->bi_private = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) bio->bi_end_io = nvmet_bio_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) nvmet_req_complete(req, errno_to_nvme_status(req, ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) switch (cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) case nvme_cmd_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case nvme_cmd_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) req->execute = nvmet_bdev_execute_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) req->metadata_len = nvmet_rw_metadata_len(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case nvme_cmd_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) req->execute = nvmet_bdev_execute_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case nvme_cmd_dsm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) req->execute = nvmet_bdev_execute_dsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case nvme_cmd_write_zeroes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) req->execute = nvmet_bdev_execute_write_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) req->sq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }