Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * NVMe over Fabrics common host code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "fabrics.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) static LIST_HEAD(nvmf_transports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static DECLARE_RWSEM(nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) static LIST_HEAD(nvmf_hosts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) static DEFINE_MUTEX(nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static struct nvmf_host *nvmf_default_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	struct nvmf_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	list_for_each_entry(host, &nvmf_hosts, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 		if (!strcmp(host->nqn, hostnqn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 			return host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static struct nvmf_host *nvmf_host_add(const char *hostnqn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	struct nvmf_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	mutex_lock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	host = __nvmf_host_find(hostnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	if (host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		kref_get(&host->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	host = kmalloc(sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	kref_init(&host->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	list_add_tail(&host->list, &nvmf_hosts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	mutex_unlock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	return host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static struct nvmf_host *nvmf_host_default(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct nvmf_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	host = kmalloc(sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (!host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	kref_init(&host->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	uuid_gen(&host->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	snprintf(host->nqn, NVMF_NQN_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	mutex_lock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	list_add_tail(&host->list, &nvmf_hosts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	mutex_unlock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void nvmf_host_destroy(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	mutex_lock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	list_del(&host->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	mutex_unlock(&nvmf_hosts_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	kfree(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static void nvmf_host_put(struct nvmf_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	if (host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		kref_put(&host->ref, nvmf_host_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * nvmf_get_address() -  Get address/port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * @ctrl:	Host NVMe controller instance which we got the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * @buf:	OUTPUT parameter that will contain the address/port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @size:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (ctrl->opts->mask & NVMF_OPT_TRADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		len += scnprintf(buf + len, size - len, "%strsvcid=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 				(len) ? "," : "", ctrl->opts->trsvcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				(len) ? "," : "", ctrl->opts->host_traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	len += scnprintf(buf + len, size - len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) EXPORT_SYMBOL_GPL(nvmf_get_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * nvmf_reg_read32() -  NVMe Fabrics "Property Get" API function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * @ctrl:	Host NVMe controller instance maintaining the admin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *		queue used to submit the property read command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *		the allocated NVMe controller resource on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * @off:	Starting offset value of the targeted property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *		register (see the fabrics section of the NVMe standard).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * @val:	OUTPUT parameter that will contain the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *		the property after a successful read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * Used by the host system to retrieve a 32-bit capsule property value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * from an NVMe controller on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * ("Capsule property" is an "PCIe register concept" applied to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * NVMe fabrics space.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *	0: successful read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  *	> 0: NVMe error status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *	< 0: Linux errno error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	union nvme_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	memset(&cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	cmd.prop_get.opcode = nvme_fabrics_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	cmd.prop_get.offset = cpu_to_le32(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			NVME_QID_ANY, 0, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		*val = le64_to_cpu(res.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			"Property Get error: %d, offset %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) EXPORT_SYMBOL_GPL(nvmf_reg_read32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * nvmf_reg_read64() -  NVMe Fabrics "Property Get" API function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * @ctrl:	Host NVMe controller instance maintaining the admin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *		queue used to submit the property read command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  *		the allocated controller resource on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * @off:	Starting offset value of the targeted property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  *		register (see the fabrics section of the NVMe standard).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * @val:	OUTPUT parameter that will contain the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  *		the property after a successful read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Used by the host system to retrieve a 64-bit capsule property value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * from an NVMe controller on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * ("Capsule property" is an "PCIe register concept" applied to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * NVMe fabrics space.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  *	0: successful read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *	> 0: NVMe error status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  *	< 0: Linux errno error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	union nvme_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	memset(&cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	cmd.prop_get.opcode = nvme_fabrics_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	cmd.prop_get.attrib = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	cmd.prop_get.offset = cpu_to_le32(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			NVME_QID_ANY, 0, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		*val = le64_to_cpu(res.u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			"Property Get error: %d, offset %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) EXPORT_SYMBOL_GPL(nvmf_reg_read64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * nvmf_reg_write32() -  NVMe Fabrics "Property Write" API function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * @ctrl:	Host NVMe controller instance maintaining the admin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *		queue used to submit the property read command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *		the allocated NVMe controller resource on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * @off:	Starting offset value of the targeted property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *		register (see the fabrics section of the NVMe standard).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * @val:	Input parameter that contains the value to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  *		written to the property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * Used by the NVMe host system to write a 32-bit capsule property value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * to an NVMe controller on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * ("Capsule property" is an "PCIe register concept" applied to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * NVMe fabrics space.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  *	0: successful write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  *	> 0: NVMe error status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  *	< 0: Linux errno error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	memset(&cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	cmd.prop_set.opcode = nvme_fabrics_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	cmd.prop_set.fctype = nvme_fabrics_type_property_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	cmd.prop_set.attrib = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	cmd.prop_set.offset = cpu_to_le32(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	cmd.prop_set.value = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			NVME_QID_ANY, 0, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			"Property Set error: %d, offset %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) EXPORT_SYMBOL_GPL(nvmf_reg_write32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * nvmf_log_connect_error() - Error-parsing-diagnostic print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * out function for connect() errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * @ctrl: the specific /dev/nvmeX device that had the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * @errval: Error code to be decoded in a more human-friendly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *	    printout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * @cmd: This is the SQE portion of a submission capsule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * @data: This is the "Data" portion of a submission capsule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		int errval, int offset, struct nvme_command *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		struct nvmf_connect_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int err_sctype = errval & (~NVME_SC_DNR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	switch (err_sctype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	case (NVME_SC_CONNECT_INVALID_PARAM):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		if (offset >> 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			char *inv_data = "Connect Invalid Data Parameter";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			switch (offset & 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			case (offsetof(struct nvmf_connect_data, cntlid)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 					"%s, cntlid: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 					inv_data, data->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			case (offsetof(struct nvmf_connect_data, hostnqn)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 					"%s, hostnqn \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 					inv_data, data->hostnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			case (offsetof(struct nvmf_connect_data, subsysnqn)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 					"%s, subsysnqn \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 					inv_data, data->subsysnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 					"%s, starting byte offset: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				       inv_data, offset & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			char *inv_sqe = "Connect Invalid SQE Parameter";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			case (offsetof(struct nvmf_connect_command, qid)):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				       "%s, qid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 					inv_sqe, cmd->connect.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 					"%s, starting byte offset: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 					inv_sqe, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	case NVME_SC_CONNECT_INVALID_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			"Connect for subsystem %s is not allowed, hostnqn: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			data->subsysnqn, data->hostnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	case NVME_SC_CONNECT_CTRL_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			"Connect command failed: controller is busy or not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	case NVME_SC_CONNECT_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			"Connect incompatible format: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			cmd->connect.recfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	case NVME_SC_HOST_PATH_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			"Connect command failed: host path error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		dev_err(ctrl->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			"Connect command failed, error wo/DNR bit: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			err_sctype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	} /* switch (err_sctype) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  *				API function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  * @ctrl:	Host nvme controller instance used to request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  *              a new NVMe controller allocation on the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  *              system and  establish an NVMe Admin connection to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  *              that controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * This function enables an NVMe host device to request a new allocation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * an NVMe controller resource on a target system as well establish a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * fabrics-protocol connection of the NVMe Admin queue between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * host system device and the allocated NVMe controller on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * target system via a NVMe Fabrics "Connect" command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  *	0: success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  *	> 0: NVMe error status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *	< 0: Linux errno error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	union nvme_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct nvmf_connect_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	memset(&cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	cmd.connect.opcode = nvme_fabrics_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	cmd.connect.fctype = nvme_fabrics_type_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	cmd.connect.qid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * Set keep-alive timeout in seconds granularity (ms * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 * and add a grace period for controller kato enforcement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	cmd.connect.kato = ctrl->kato ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (ctrl->opts->disable_sqflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	data->cntlid = cpu_to_le16(0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			data, sizeof(*data), 0, NVME_QID_ANY, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				       &cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	ctrl->cntlid = le16_to_cpu(res.u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) out_free_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  *			     API function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * @ctrl:	Host nvme controller instance used to establish an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *		NVMe I/O queue connection to the already allocated NVMe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *		controller on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * @qid:	NVMe I/O queue number for the new I/O connection between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  *		host and target (note qid == 0 is illegal as this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  *		the Admin queue, per NVMe standard).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * @poll:	Whether or not to poll for the completion of the connect cmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * This function issues a fabrics-protocol connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * between the host system device and the allocated NVMe controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * on the target system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  *	0: success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  *	> 0: NVMe error status code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  *	< 0: Linux errno error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct nvme_command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct nvmf_connect_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	union nvme_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	memset(&cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	cmd.connect.opcode = nvme_fabrics_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	cmd.connect.fctype = nvme_fabrics_type_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	cmd.connect.qid = cpu_to_le16(qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (ctrl->opts->disable_sqflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	data->cntlid = cpu_to_le16(ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			data, sizeof(*data), 0, qid, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 				       &cmd, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (ctrl->opts->max_reconnects == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	    ctrl->nr_reconnects < ctrl->opts->max_reconnects)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * nvmf_register_transport() - NVMe Fabrics Library registration function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * @ops:	Transport ops instance to be registered to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *		common fabrics library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * API function that registers the type of specific transport fabric
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * being implemented to the common NVMe fabrics library. Part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * the overall init sequence of starting up a fabrics driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) int nvmf_register_transport(struct nvmf_transport_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (!ops->create_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	down_write(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	list_add_tail(&ops->entry, &nvmf_transports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	up_write(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) EXPORT_SYMBOL_GPL(nvmf_register_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * @ops:	Transport ops instance to be unregistered from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  *		common fabrics library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * Fabrics API function that unregisters the type of specific transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * fabric being implemented from the common NVMe fabrics library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * Part of the overall exit sequence of unloading the implemented driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	down_write(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	list_del(&ops->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	up_write(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) static struct nvmf_transport_ops *nvmf_lookup_transport(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct nvmf_transport_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	lockdep_assert_held(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	list_for_each_entry(ops, &nvmf_transports, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		if (strcmp(ops->name, opts->transport) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  * For something we're not in a state to send to the device the default action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545)  * is to busy it and retry it after the controller state is recovered.  However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546)  * if the controller is deleting or if anything is marked for failfast or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547)  * nvme multipath it is immediately failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * Note: commands used to initialize the controller will be marked for failfast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * Note: nvme cli/ioctl commands are marked for failfast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	    ctrl->state != NVME_CTRL_DEAD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	nvme_complete_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		bool queue_live)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	struct nvme_request *req = nvme_req(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	 * currently we have a problem sending passthru commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	 * on the admin_q if the controller is not LIVE because we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	 * make sure that they are going out after the admin connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	 * controller enable and/or other commands in the initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	 * sequence. until the controller will be LIVE, fail with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 * BLK_STS_RESOURCE so that they will be rescheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * Only allow commands on a live queue, except for the connect command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * which is require to set the queue live in the appropinquate states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	switch (ctrl->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	case NVME_CTRL_CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	case NVME_CTRL_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	return queue_live;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) EXPORT_SYMBOL_GPL(__nvmf_check_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static const match_table_t opt_tokens = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	{ NVMF_OPT_TRANSPORT,		"transport=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	{ NVMF_OPT_TRADDR,		"traddr=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	{ NVMF_OPT_TRSVCID,		"trsvcid=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	{ NVMF_OPT_NQN,			"nqn=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	{ NVMF_OPT_QUEUE_SIZE,		"queue_size=%d"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	{ NVMF_OPT_NR_IO_QUEUES,	"nr_io_queues=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	{ NVMF_OPT_RECONNECT_DELAY,	"reconnect_delay=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	{ NVMF_OPT_CTRL_LOSS_TMO,	"ctrl_loss_tmo=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	{ NVMF_OPT_KATO,		"keep_alive_tmo=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	{ NVMF_OPT_HOSTNQN,		"hostnqn=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	{ NVMF_OPT_HOST_TRADDR,		"host_traddr=%s"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	{ NVMF_OPT_HOST_ID,		"hostid=%s"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	{ NVMF_OPT_DUP_CONNECT,		"duplicate_connect"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	{ NVMF_OPT_DISABLE_SQFLOW,	"disable_sqflow"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	{ NVMF_OPT_HDR_DIGEST,		"hdr_digest"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	{ NVMF_OPT_DATA_DIGEST,		"data_digest"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	{ NVMF_OPT_NR_WRITE_QUEUES,	"nr_write_queues=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	{ NVMF_OPT_NR_POLL_QUEUES,	"nr_poll_queues=%d"	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	{ NVMF_OPT_TOS,			"tos=%d"		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	{ NVMF_OPT_ERR,			NULL			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	substring_t args[MAX_OPT_ARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	char *options, *o, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	int token, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	size_t nqnlen  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	uuid_t hostid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	/* Set defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	opts->queue_size = NVMF_DEF_QUEUE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	opts->nr_io_queues = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	opts->kato = NVME_DEFAULT_KATO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	opts->duplicate_connect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	opts->hdr_digest = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	opts->data_digest = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	opts->tos = -1; /* < 0 == use transport default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	options = o = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (!options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	uuid_gen(&hostid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	while ((p = strsep(&o, ",\n")) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (!*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		token = match_token(p, opt_tokens, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		opts->mask |= token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		switch (token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		case NVMF_OPT_TRANSPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			kfree(opts->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			opts->transport = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		case NVMF_OPT_NQN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			kfree(opts->subsysnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			opts->subsysnqn = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			nqnlen = strlen(opts->subsysnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			if (nqnlen >= NVMF_NQN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				pr_err("%s needs to be < %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 					opts->subsysnqn, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			opts->discovery_nqn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				!(strcmp(opts->subsysnqn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 					 NVME_DISC_SUBSYS_NAME));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		case NVMF_OPT_TRADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			kfree(opts->traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			opts->traddr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		case NVMF_OPT_TRSVCID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			kfree(opts->trsvcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			opts->trsvcid = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		case NVMF_OPT_QUEUE_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			if (token < NVMF_MIN_QUEUE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			    token > NVMF_MAX_QUEUE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 				pr_err("Invalid queue_size %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			opts->queue_size = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		case NVMF_OPT_NR_IO_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			if (token <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				pr_err("Invalid number of IOQs %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			if (opts->discovery_nqn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				pr_debug("Ignoring nr_io_queues value for discovery controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			opts->nr_io_queues = min_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 					num_online_cpus(), token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		case NVMF_OPT_KATO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			if (token < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 				pr_err("Invalid keep_alive_tmo %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			} else if (token == 0 && !opts->discovery_nqn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 				/* Allowed for debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 				pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			opts->kato = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		case NVMF_OPT_CTRL_LOSS_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			if (token < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 				pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			ctrl_loss_tmo = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		case NVMF_OPT_HOSTNQN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			if (opts->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				pr_err("hostnqn already user-assigned: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				       opts->host->nqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 				ret = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			nqnlen = strlen(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			if (nqnlen >= NVMF_NQN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				pr_err("%s needs to be < %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 					p, NVMF_NQN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 				kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			nvmf_host_put(opts->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			opts->host = nvmf_host_add(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			if (!opts->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		case NVMF_OPT_RECONNECT_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			if (token <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				pr_err("Invalid reconnect_delay %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			opts->reconnect_delay = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		case NVMF_OPT_HOST_TRADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			kfree(opts->host_traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			opts->host_traddr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		case NVMF_OPT_HOST_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			p = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			ret = uuid_parse(p, &hostid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				pr_err("Invalid hostid %s\n", p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		case NVMF_OPT_DUP_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			opts->duplicate_connect = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		case NVMF_OPT_DISABLE_SQFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			opts->disable_sqflow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		case NVMF_OPT_HDR_DIGEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			opts->hdr_digest = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		case NVMF_OPT_DATA_DIGEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			opts->data_digest = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		case NVMF_OPT_NR_WRITE_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			if (token <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				pr_err("Invalid nr_write_queues %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			opts->nr_write_queues = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		case NVMF_OPT_NR_POLL_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			if (token <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 				pr_err("Invalid nr_poll_queues %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			opts->nr_poll_queues = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		case NVMF_OPT_TOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			if (match_int(args, &token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			if (token < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 				pr_err("Invalid type of service %d\n", token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			if (token > 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				pr_warn("Clamping type of service to 255\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				token = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			opts->tos = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (opts->discovery_nqn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		opts->nr_io_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		opts->nr_write_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		opts->nr_poll_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		opts->duplicate_connect = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if (ctrl_loss_tmo < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		opts->max_reconnects = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 						opts->reconnect_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (!opts->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		kref_get(&nvmf_default_host->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		opts->host = nvmf_default_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	uuid_copy(&opts->host->id, &hostid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	kfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		unsigned int required_opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if ((opts->mask & required_opts) != required_opts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			if ((opt_tokens[i].token & required_opts) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			    !(opt_tokens[i].token & opts->mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 				pr_warn("missing parameter '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 					opt_tokens[i].pattern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	    strcmp(opts->traddr, ctrl->opts->traddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	    strcmp(opts->trsvcid, ctrl->opts->trsvcid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * Checking the local address is rough. In most cases, none is specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * and the host port is selected by the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * Assume no match if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * -  local address is specified and address is not the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * -  local address is not specified but remote is, or vice versa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 *    (admin using specific host_traddr when it matters).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	    (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	} else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		   (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		unsigned int allowed_opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (opts->mask & ~allowed_opts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			if ((opt_tokens[i].token & opts->mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			    (opt_tokens[i].token & ~allowed_opts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				pr_warn("invalid parameter '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 					opt_tokens[i].pattern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) void nvmf_free_options(struct nvmf_ctrl_options *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	nvmf_host_put(opts->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	kfree(opts->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	kfree(opts->traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	kfree(opts->trsvcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	kfree(opts->subsysnqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	kfree(opts->host_traddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	kfree(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) EXPORT_SYMBOL_GPL(nvmf_free_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) #define NVMF_REQUIRED_OPTS	(NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) #define NVMF_ALLOWED_OPTS	(NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 				 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				 NVMF_OPT_DISABLE_SQFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static struct nvme_ctrl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) nvmf_create_ctrl(struct device *dev, const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct nvmf_ctrl_options *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct nvmf_transport_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (!opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	ret = nvmf_parse_options(opts, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	request_module("nvme-%s", opts->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * Check the generic options first as we need a valid transport for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * the lookup below.  Then clear the generic flags so that transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * drivers don't have to care about them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	opts->mask &= ~NVMF_REQUIRED_OPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	down_read(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	ops = nvmf_lookup_transport(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		pr_info("no handler found for transport %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			opts->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (!try_module_get(ops->module)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	up_read(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	ret = nvmf_check_required_opts(opts, ops->required_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				ops->allowed_opts | ops->required_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	ctrl = ops->create_ctrl(dev, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (IS_ERR(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		ret = PTR_ERR(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	module_put(ops->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) out_module_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	module_put(ops->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	goto out_free_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	up_read(&nvmf_transports_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) out_free_opts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	nvmf_free_options(opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static struct class *nvmf_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static struct device *nvmf_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static DEFINE_MUTEX(nvmf_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		size_t count, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct seq_file *seq_file = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	const char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (count > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	buf = memdup_user_nul(ubuf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (IS_ERR(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		return PTR_ERR(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	mutex_lock(&nvmf_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (seq_file->private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	ctrl = nvmf_create_ctrl(nvmf_device, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (IS_ERR(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		ret = PTR_ERR(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	seq_file->private = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	mutex_unlock(&nvmf_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int nvmf_dev_show(struct seq_file *seq_file, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct nvme_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	mutex_lock(&nvmf_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	ctrl = seq_file->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (!ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	seq_printf(seq_file, "instance=%d,cntlid=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			ctrl->instance, ctrl->cntlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	mutex_unlock(&nvmf_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static int nvmf_dev_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 * The miscdevice code initializes file->private_data, but doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * make use of it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	return single_open(file, nvmf_dev_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static int nvmf_dev_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct seq_file *seq_file = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	struct nvme_ctrl *ctrl = seq_file->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		nvme_put_ctrl(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	return single_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) static const struct file_operations nvmf_dev_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	.write		= nvmf_dev_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	.open		= nvmf_dev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	.release	= nvmf_dev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static struct miscdevice nvmf_misc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	.minor		= MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	.name           = "nvme-fabrics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	.fops		= &nvmf_dev_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int __init nvmf_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	nvmf_default_host = nvmf_host_default();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (!nvmf_default_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (IS_ERR(nvmf_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		pr_err("couldn't register class nvme-fabrics\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		ret = PTR_ERR(nvmf_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		goto out_free_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	nvmf_device =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (IS_ERR(nvmf_device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		pr_err("couldn't create nvme-fabris device!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		ret = PTR_ERR(nvmf_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		goto out_destroy_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	ret = misc_register(&nvmf_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		pr_err("couldn't register misc device: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		goto out_destroy_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) out_destroy_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	device_destroy(nvmf_class, MKDEV(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) out_destroy_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	class_destroy(nvmf_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) out_free_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	nvmf_host_put(nvmf_default_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static void __exit nvmf_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	misc_deregister(&nvmf_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	device_destroy(nvmf_class, MKDEV(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	class_destroy(nvmf_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	nvmf_host_put(nvmf_default_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) module_init(nvmf_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) module_exit(nvmf_exit);