Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Discovery service for the NVMe over Fabrics target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2016 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <generated/utsrelease.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) struct nvmet_subsys *nvmet_disc_subsys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) static u64 nvmet_genctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static void __nvmet_disc_changed(struct nvmet_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 				 struct nvmet_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	if (ctrl->port != port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 			      NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) void nvmet_port_disc_changed(struct nvmet_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 			     struct nvmet_subsys *subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct nvmet_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	lockdep_assert_held(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	nvmet_genctr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	mutex_lock(&nvmet_disc_subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		__nvmet_disc_changed(port, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	mutex_unlock(&nvmet_disc_subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/* If transport can signal change, notify transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (port->tr_ops && port->tr_ops->discovery_chg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		port->tr_ops->discovery_chg(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 					struct nvmet_subsys *subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 					struct nvmet_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct nvmet_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	mutex_lock(&nvmet_disc_subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		__nvmet_disc_changed(port, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	mutex_unlock(&nvmet_disc_subsys->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			       struct nvmet_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct nvmet_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct nvmet_subsys_link *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	nvmet_genctr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	list_for_each_entry(port, nvmet_ports, global_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		list_for_each_entry(s, &port->subsystems, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			if (s->subsys != subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			__nvmet_subsys_disc_changed(port, subsys, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	down_write(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (list_empty(&port->entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		list_add_tail(&port->entry, &parent->referrals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		port->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		nvmet_port_disc_changed(parent, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	up_write(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	down_write(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (!list_empty(&port->entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		port->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		list_del_init(&port->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		nvmet_port_disc_changed(parent, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	up_write(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		struct nvmet_port *port, char *subsys_nqn, char *traddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		u8 type, u32 numrec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	e->trtype = port->disc_addr.trtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	e->adrfam = port->disc_addr.adrfam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	e->treq = port->disc_addr.treq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	e->portid = port->disc_addr.portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	/* we support only dynamic controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	e->subtype = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * nvmet_set_disc_traddr - set a correct discovery log entry traddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * must not contain that "any" IP address. If the transport implements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * .disc_traddr, use it. this callback will set the discovery traddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * from the req->port address in case the port in question listens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * "any" IP address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		char *traddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (req->ops->disc_traddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		req->ops->disc_traddr(req, port, traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static size_t discovery_log_entries(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct nvmet_subsys_link *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct nvmet_port *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	size_t entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	list_for_each_entry(p, &req->port->subsystems, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	list_for_each_entry(r, &req->port->referrals, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct nvmf_disc_rsp_page_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	u64 offset = nvmet_get_log_page_offset(req->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	size_t data_len = nvmet_get_log_page_len(req->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	size_t alloc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct nvmet_subsys_link *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct nvmet_port *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u32 numrec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	void *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!nvmet_check_transfer_len(req, data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			offsetof(struct nvme_get_log_page_command, lid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	/* Spec requires dword aligned offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (offset & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			offsetof(struct nvme_get_log_page_command, lpo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * Make sure we're passing at least a buffer of response header size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 * If host provided data len is less than the header size, only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	 * number of bytes requested by host will be sent to host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	down_read(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	buffer = kzalloc(alloc_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		up_read(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	hdr = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	list_for_each_entry(p, &req->port->subsystems, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		char traddr[NVMF_TRADDR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		nvmet_set_disc_traddr(req, req->port, traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		nvmet_format_discovery_entry(hdr, req->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				p->subsys->subsysnqn, traddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				NVME_NQN_NVME, numrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		numrec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	list_for_each_entry(r, &req->port->referrals, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		nvmet_format_discovery_entry(hdr, r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				NVME_DISC_SUBSYS_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 				r->disc_addr.traddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 				NVME_NQN_DISC, numrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		numrec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	hdr->genctr = cpu_to_le64(nvmet_genctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	hdr->numrec = cpu_to_le64(numrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	hdr->recfmt = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	up_read(&nvmet_config_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void nvmet_execute_disc_identify(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct nvme_id_ctrl *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	const char model[] = "Linux";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		req->error_loc = offsetof(struct nvme_identify, cns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	id = kzalloc(sizeof(*id), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (!id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	memset(id->sn, ' ', sizeof(id->sn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	bin2hex(id->sn, &ctrl->subsys->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	memset(id->fr, ' ', sizeof(id->fr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	memcpy_and_pad(id->fr, sizeof(id->fr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* no limit on data transfer sizes for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	id->mdts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	id->cntlid = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	id->ver = cpu_to_le32(ctrl->subsys->ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	id->lpa = (1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		id->sgls |= cpu_to_le32(1 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (req->port->inline_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		id->sgls |= cpu_to_le32(1 << 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	kfree(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void nvmet_execute_disc_set_features(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	u16 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	switch (cdw10 & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		stat = nvmet_set_feat_kato(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		stat = nvmet_set_feat_async_event(req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 						  NVMET_DISC_AEN_CFG_OPTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			offsetof(struct nvme_common_command, cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	nvmet_req_complete(req, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void nvmet_execute_disc_get_features(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	u16 stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	switch (cdw10 & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	case NVME_FEAT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		nvmet_get_feat_kato(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	case NVME_FEAT_ASYNC_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		nvmet_get_feat_async_event(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			offsetof(struct nvme_common_command, cdw10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	nvmet_req_complete(req, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		pr_err("got cmd %d while not ready\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		       cmd->common.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		req->error_loc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	switch (cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	case nvme_admin_set_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		req->execute = nvmet_execute_disc_set_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	case nvme_admin_get_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		req->execute = nvmet_execute_disc_get_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	case nvme_admin_async_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		req->execute = nvmet_execute_async_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	case nvme_admin_keep_alive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		req->execute = nvmet_execute_keep_alive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	case nvme_admin_get_log_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		req->execute = nvmet_execute_disc_get_log_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	case nvme_admin_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		req->execute = nvmet_execute_disc_identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		pr_err("unhandled cmd %d\n", cmd->common.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int __init nvmet_init_discovery(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	nvmet_disc_subsys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void nvmet_exit_discovery(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	nvmet_subsys_put(nvmet_disc_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }