Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * NVMe Over Fabrics Target File I/O commands implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2017-2018 Western Digital Corporation or its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/falloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "nvmet.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define NVMET_MAX_MPOOL_BVEC		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define NVMET_MIN_MPOOL_OBJ		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct kstat stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 			  AT_STATX_FORCE_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		ns->size = stat.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) void nvmet_file_ns_disable(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (ns->file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		if (ns->buffered_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			flush_workqueue(buffered_io_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		mempool_destroy(ns->bvec_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		ns->bvec_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		kmem_cache_destroy(ns->bvec_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		ns->bvec_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		fput(ns->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		ns->file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) int nvmet_file_ns_enable(struct nvmet_ns *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int flags = O_RDWR | O_LARGEFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (!ns->buffered_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		flags |= O_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	ns->file = filp_open(ns->device_path, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (IS_ERR(ns->file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		ret = PTR_ERR(ns->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		pr_err("failed to open file %s: (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			ns->device_path, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		ns->file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ret = nvmet_file_ns_revalidate(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * i_blkbits can be greater than the universally accepted upper bound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * so make sure we export a sane namespace lba_shift.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	ns->blksize_shift = min_t(u8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			file_inode(ns->file)->i_blkbits, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			0, SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (!ns->bvec_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			mempool_free_slab, ns->bvec_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (!ns->bvec_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	ns->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	ns->blksize_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	nvmet_file_ns_disable(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	bv->bv_page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	bv->bv_offset = sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	bv->bv_len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		unsigned long nr_segs, size_t count, int ki_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct kiocb *iocb = &req->f.iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct iov_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	int rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (req->cmd->rw.opcode == nvme_cmd_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			ki_flags |= IOCB_DSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		call_iter = req->ns->file->f_op->write_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		rw = WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		call_iter = req->ns->file->f_op->read_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		rw = READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	iocb->ki_pos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	iocb->ki_filp = req->ns->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return call_iter(iocb, &iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	u16 status = NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (req->f.bvec != req->inline_bvec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (likely(req->f.mpool_alloc == false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			kfree(req->f.bvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			mempool_free(req->f.bvec, req->ns->bvec_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (unlikely(ret != req->transfer_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		status = errno_to_nvme_status(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	ssize_t nr_bvec = req->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	unsigned long bv_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	bool is_sync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	size_t len = 0, total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	loff_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		is_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (unlikely(pos + req->transfer_len > req->ns->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	memset(&req->f.iocb, 0, sizeof(struct kiocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		len += req->f.bvec[bv_cnt].bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		total_len += req->f.bvec[bv_cnt].bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		bv_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		WARN_ON_ONCE((nr_bvec - 1) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (unlikely(is_sync) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		    (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			pos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			bv_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		nr_bvec--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (WARN_ON_ONCE(total_len != req->transfer_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (unlikely(is_sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		ret = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * A NULL ki_complete ask for synchronous execution, which we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 * for the IOCB_NOWAIT case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (!(ki_flags & IOCB_NOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		req->f.iocb.ki_complete = nvmet_file_io_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	case -EIOCBQUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	case -EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 * For file systems returning error -EOPNOTSUPP, handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		 * IOCB_NOWAIT error case separately and retry without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		 * IOCB_NOWAIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if ((ki_flags & IOCB_NOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	nvmet_file_io_done(&req->f.iocb, ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void nvmet_file_buffered_io_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	nvmet_file_execute_io(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	queue_work(buffered_io_wq, &req->f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void nvmet_file_execute_rw(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	ssize_t nr_bvec = req->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!req->sg_cnt || !nr_bvec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		req->f.bvec = req->inline_bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (unlikely(!req->f.bvec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		/* fallback under memory pressure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		req->f.mpool_alloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		req->f.mpool_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (req->ns->buffered_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		if (likely(!req->f.mpool_alloc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		    (req->ns->file->f_mode & FMODE_NOWAIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		    nvmet_file_execute_io(req, IOCB_NOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		nvmet_file_submit_buffered_io(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		nvmet_file_execute_io(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u16 nvmet_file_flush(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void nvmet_file_flush_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	nvmet_req_complete(req, nvmet_file_flush(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void nvmet_file_execute_flush(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	schedule_work(&req->f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void nvmet_file_execute_discard(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct nvme_dsm_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	loff_t offset, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	u16 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 					sizeof(range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		len = le32_to_cpu(range.nlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		len <<= req->ns->blksize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (offset + len > req->ns->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			req->error_slba = le64_to_cpu(range.slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			status = errno_to_nvme_status(req, -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		ret = vfs_fallocate(req->ns->file, mode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		if (ret && ret != -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			req->error_slba = le64_to_cpu(range.slba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			status = errno_to_nvme_status(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	nvmet_req_complete(req, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void nvmet_file_dsm_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	case NVME_DSMGMT_AD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		nvmet_file_execute_discard(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	case NVME_DSMGMT_IDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	case NVME_DSMGMT_IDW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		/* Not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		nvmet_req_complete(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void nvmet_file_execute_dsm(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	schedule_work(&req->f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void nvmet_file_write_zeroes_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	loff_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			req->ns->blksize_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (unlikely(offset + len > req->ns->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	ret = vfs_fallocate(req->ns->file, mode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (!nvmet_check_transfer_len(req, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	schedule_work(&req->f.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	struct nvme_command *cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	switch (cmd->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	case nvme_cmd_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	case nvme_cmd_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		req->execute = nvmet_file_execute_rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	case nvme_cmd_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		req->execute = nvmet_file_execute_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	case nvme_cmd_dsm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		req->execute = nvmet_file_execute_dsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	case nvme_cmd_write_zeroes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		req->execute = nvmet_file_execute_write_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		pr_err("unhandled cmd for file ns %d on qid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				cmd->common.opcode, req->sq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		req->error_loc = offsetof(struct nvme_common_command, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }