^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2011-2014, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _NVME_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _NVME_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/nvme.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/lightnvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sed-opal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fault-inject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/t10-pi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) extern unsigned int nvme_io_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern unsigned int admin_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define ADMIN_TIMEOUT (admin_timeout * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define NVME_DEFAULT_KATO 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define NVME_KATO_GRACE 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifdef CONFIG_ARCH_NO_SG_CHAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define NVME_INLINE_SG_CNT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define NVME_INLINE_METADATA_SG_CNT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define NVME_INLINE_SG_CNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define NVME_INLINE_METADATA_SG_CNT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Default to a 4K page size, with the intention to update this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * path in the future to accommodate architectures with differing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * kernel and IO page sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define NVME_CTRL_PAGE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern struct workqueue_struct *nvme_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern struct workqueue_struct *nvme_reset_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) extern struct workqueue_struct *nvme_delete_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) NVME_NS_LBA = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) NVME_NS_LIGHTNVM = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * List of workarounds for devices that required behavior not specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * the standard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) enum nvme_quirks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Prefers I/O aligned to a stripe size specified in a vendor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * specific Identify field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) NVME_QUIRK_STRIPE_SIZE = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * The controller doesn't handle Identify value others than 0 or 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * The controller deterministically returns O's on reads to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * logical blocks that deallocate was called on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * The controller needs a delay before starts checking the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * readiness, which is done by reading the NVME_CSTS_RDY bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * APST should not be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) NVME_QUIRK_NO_APST = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * The deepest sleep state should not be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Supports the LighNVM command set if indicated in vs[1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) NVME_QUIRK_LIGHTNVM = (1 << 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Set MEDIUM priority on SQ creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Ignore device provided subnqn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Broken Write Zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Force simple suspend/resume path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Use only one interrupt vector for all queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Use non-standard 128 bytes SQEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) NVME_QUIRK_128_BYTES_SQES = (1 << 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Prevent tag overlap between queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) NVME_QUIRK_SHARED_TAGS = (1 << 13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Don't change the value of the temperature threshold feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * The controller doesn't handle the Identify Namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Identification Descriptor list subcommand despite claiming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * NVMe 1.3 compliance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * The controller requires the command_id value be be limited, so skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * encoding the generation sequence number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Common request structure for NVMe passthrough. All drivers must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * this structure as the first member of their request-private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct nvme_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct nvme_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) union nvme_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u8 genctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u8 retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Mark a bio as coming in through the mpath node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define REQ_NVME_MPATH REQ_DRV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) NVME_REQ_CANCELLED = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) NVME_REQ_USERCMD = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline struct nvme_request *nvme_req(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline u16 nvme_req_qid(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!req->q->queuedata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* The below value is the specific amount of delay needed before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * found empirically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define NVME_QUIRK_DELAY_AMOUNT 2300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * enum nvme_ctrl_state: Controller state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @NVME_CTRL_NEW: New controller just allocated, initial state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @NVME_CTRL_LIVE: Controller is connected and I/O capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * disabled/failed immediately. This state comes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * after all async event processing took place and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * before ns removal and the controller deletion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * shutdown or removal. In this case we forcibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * kill all inflight I/O as they have no chance to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) enum nvme_ctrl_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) NVME_CTRL_NEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) NVME_CTRL_LIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) NVME_CTRL_RESETTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) NVME_CTRL_CONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) NVME_CTRL_DELETING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) NVME_CTRL_DELETING_NOIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) NVME_CTRL_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct nvme_fault_inject {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct fault_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct dentry *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool dont_retry; /* DNR, do not retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u16 status; /* status code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct nvme_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) bool comp_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) enum nvme_ctrl_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bool identified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct mutex scan_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const struct nvme_ctrl_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct request_queue *admin_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct request_queue *connect_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct request_queue *fabrics_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct blk_mq_tag_set *tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct blk_mq_tag_set *admin_tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct list_head namespaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct rw_semaphore namespaces_rwsem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct device ctrl_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct device *device; /* char device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct cdev cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct work_struct reset_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct work_struct delete_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) wait_queue_head_t state_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct nvme_subsystem *subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct list_head subsys_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct opal_dev *opal_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) char name[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u16 cntlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 ctrl_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u16 mtfa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 queue_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 max_hw_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u32 max_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 max_integrity_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 max_zone_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u16 crdt[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u16 oncs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u16 oacs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u16 nssa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u16 nr_streams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u16 sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 max_namespaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) atomic_t abort_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u8 vwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u32 sgls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u16 kas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u8 npss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u8 apsta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u16 wctemp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u16 cctemp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 oaes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 aen_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 ctratt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned int shutdown_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned int kato;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bool subsystem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct nvme_id_power_state psd[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct nvme_effects_log *effects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct xarray cels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct work_struct scan_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct work_struct async_event_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct delayed_work ka_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct nvme_command ka_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct work_struct fw_act_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #ifdef CONFIG_NVME_MULTIPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* asymmetric namespace access: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u8 anacap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u8 anatt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 anagrpmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u32 nanagrpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct mutex ana_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct nvme_ana_rsp_hdr *ana_log_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) size_t ana_log_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct timer_list anatt_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct work_struct ana_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Power saving configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u64 ps_max_latency_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bool apst_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* PCIe only: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 hmpre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 hmmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u32 hmminds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u16 hmmaxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Fabrics only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u32 ioccsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 iorcsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u16 icdoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u16 maxcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int nr_reconnects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct nvmf_ctrl_options *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct page *discard_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long discard_page_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct nvme_fault_inject fault_inject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) enum nvme_iopolicy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) NVME_IOPOLICY_NUMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) NVME_IOPOLICY_RR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct nvme_subsystem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Because we unregister the device on the last put we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * a separate refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct list_head ctrls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct list_head nsheads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) char subnqn[NVMF_NQN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) char serial[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) char model[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) char firmware_rev[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u8 cmic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) u16 vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u16 awupf; /* 0's based awupf value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct ida ns_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #ifdef CONFIG_NVME_MULTIPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) enum nvme_iopolicy iopolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Container structure for uniqueue namespace identifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct nvme_ns_ids {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u8 eui64[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) u8 nguid[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) uuid_t uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u8 csi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Anchor structure for namespaces. There is one for each namespace in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * NVMe subsystem that any of our controllers can see, and the namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * structure for each controller is chained of it. For private namespaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * there is a 1:1 relation to our namespace structures, that is ->list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * only ever has a single entry for private namespaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct nvme_ns_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct srcu_struct srcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct nvme_subsystem *subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned ns_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct nvme_ns_ids ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bool shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct nvme_effects_log *effects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #ifdef CONFIG_NVME_MULTIPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct bio_list requeue_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spinlock_t requeue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct work_struct requeue_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define NVME_NSHEAD_DISK_LIVE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct nvme_ns __rcu *current_path[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) enum nvme_ns_features {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct nvme_ns {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct request_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #ifdef CONFIG_NVME_MULTIPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) enum nvme_ana_state ana_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 ana_grpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct list_head siblings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct nvm_dev *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct nvme_ns_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int lba_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u16 ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u16 sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u32 sws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u8 pi_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u64 zsze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned long features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #define NVME_NS_REMOVING 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #define NVME_NS_DEAD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #define NVME_NS_ANA_PENDING 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct nvme_fault_inject fault_inject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* NVMe ns supports metadata actions by the controller (generate/strip) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct nvme_ctrl_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #define NVME_F_FABRICS (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #define NVME_F_METADATA_SUPPORTED (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #define NVME_F_PCI_P2PDMA (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void (*free_ctrl)(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) void (*submit_async_event)(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) void (*delete_ctrl)(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * nvme command_id is constructed as such:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * | xxxx | xxxxxxxxxxxx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * gen request tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #define nvme_genctr_mask(gen) (gen & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define nvme_tag_from_cid(cid) (cid & 0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline u16 nvme_cid(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u16 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) u8 genctr = nvme_genctr_from_cid(command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u16 tag = nvme_tag_from_cid(command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rq = blk_mq_tag_to_rq(tags, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (unlikely(!rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) pr_err("could not locate request for tag %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(nvme_req(rq)->ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "request %#x genctr mismatch (got %#x expected %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u16 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) const char *dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void nvme_should_fail(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) const char *dev_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static inline void nvme_should_fail(struct request *req) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!ctrl->subsystem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Convert a 512B sector number to a device logical block number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return sector >> (ns->lba_shift - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Convert a device logical block number to a 512B sector number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return lba << (ns->lba_shift - SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Convert byte length to nvme's 0-based num dwords
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static inline u32 nvme_bytes_to_numd(size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return (len >> 2) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static inline bool nvme_is_ana_error(u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) switch (status & 0x7ff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) case NVME_SC_ANA_TRANSITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) case NVME_SC_ANA_INACCESSIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case NVME_SC_ANA_PERSISTENT_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static inline bool nvme_is_path_error(u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* check for a status code type of 'path related status' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return (status & 0x700) == 0x300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Fill in the status and result information from the CQE, and then figure out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * if blk-mq will need to use IPI magic to complete the request, and if yes do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * so. If not let the caller complete the request without an indirect function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static inline bool nvme_try_complete_req(struct request *req, __le16 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) union nvme_result result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct nvme_request *rq = nvme_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rq->status = le16_to_cpu(status) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rq->result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* inject error when permitted by fault injection framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) nvme_should_fail(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (unlikely(blk_should_fake_timeout(req->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return blk_mq_complete_request_remote(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) get_device(ctrl->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) put_device(ctrl->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return !qid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void nvme_complete_rq(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) bool nvme_cancel_request(struct request *req, void *data, bool reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) enum nvme_ctrl_state new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bool nvme_wait_reset(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) const struct nvme_ctrl_ops *ops, unsigned long quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) void nvme_start_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int nvme_init_identify(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bool send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) volatile union nvme_result *res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void nvme_stop_queues(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) void nvme_start_queues(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) void nvme_kill_queues(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) void nvme_sync_queues(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) void nvme_unfreeze(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) void nvme_wait_freeze(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) void nvme_start_freeze(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #define NVME_QID_ANY -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct request *nvme_alloc_request(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) void nvme_cleanup_cmd(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct nvme_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void *buf, unsigned bufflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) union nvme_result *result, void *buffer, unsigned bufflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned timeout, int qid, int at_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) blk_mq_req_flags_t flags, bool poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned int dword11, void *buffer, size_t buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) u32 *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned int dword11, void *buffer, size_t buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) u32 *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) void *log, size_t size, u64 offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct nvme_ns_head **head, int *srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) extern const struct attribute_group *nvme_ns_id_attr_groups[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) extern const struct block_device_operations nvme_ns_head_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) #ifdef CONFIG_NVME_MULTIPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return ctrl->ana_log_buf != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct nvme_ctrl *ctrl, int *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void nvme_failover_req(struct request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void nvme_mpath_remove_disk(struct nvme_ns_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) void nvme_mpath_stop(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct nvme_ns_head *head = ns->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (head->disk && list_empty(&head->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) kblockd_schedule_work(&head->requeue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static inline void nvme_trace_bio_complete(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct nvme_ns *ns = req->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (req->cmd_flags & REQ_NVME_MPATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) trace_block_bio_complete(ns->head->disk->queue, req->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) extern struct device_attribute dev_attr_ana_grpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) extern struct device_attribute dev_attr_ana_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) extern struct device_attribute subsys_attr_iopolicy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Without the multipath code enabled, multiple controller per subsystems are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * visible as devices and thus we cannot use the subsystem instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct nvme_ctrl *ctrl, int *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static inline void nvme_failover_req(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct nvme_ns_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct nvme_id_ns *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static inline void nvme_trace_bio_complete(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) blk_status_t status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct nvme_id_ctrl *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (ctrl->subsys->cmic & (1 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev_warn(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #endif /* CONFIG_NVME_MULTIPATH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int nvme_revalidate_zones(struct nvme_ns *ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int nvme_report_zones(struct gendisk *disk, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int nr_zones, report_zones_cb cb, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct nvme_command *cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) enum nvme_zone_mgmt_action action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #define nvme_report_zones NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct request *req, struct nvme_command *cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) enum nvme_zone_mgmt_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dev_warn(ns->ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #ifdef CONFIG_NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) void nvme_nvm_unregister(struct nvme_ns *ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) extern const struct attribute_group nvme_nvm_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #endif /* CONFIG_NVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return dev_to_disk(dev)->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) #ifdef CONFIG_NVME_HWMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int nvme_hwmon_init(struct nvme_ctrl *ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) u8 opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) void nvme_execute_passthru_rq(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) void nvme_put_ns(struct nvme_ns *ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) #endif /* _NVME_H */