Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Module for pnfs flexfile layout driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Tao Peng <bergwolf@primarydata.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/nfs_mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/nfs_page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sunrpc/metrics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "flexfilelayout.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "../nfs4session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "../nfs4idmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "../internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "../delegation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "../nfs4trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "../iostat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "../nfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "../nfs42.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define FF_LAYOUTRETURN_MAXERR 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static unsigned short io_maxretrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static const struct pnfs_commit_ops ff_layout_commit_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		struct nfs_pgio_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 			       struct nfs42_layoutstat_devinfo *devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 			       int dev_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 			      const struct nfs42_layoutstat_devinfo *devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 			      struct nfs4_ff_layout_mirror *mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static struct pnfs_layout_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct nfs4_flexfile_layout *ffl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	if (ffl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		pnfs_init_ds_commit_info(&ffl->commit_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		INIT_LIST_HEAD(&ffl->error_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		INIT_LIST_HEAD(&ffl->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		ffl->last_report_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		ffl->commit_info.ops = &ff_layout_commit_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		return &ffl->generic_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct nfs4_ff_layout_ds_err *err, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		list_del(&err->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		kfree(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	kfree_rcu(ffl, generic_hdr.plh_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	if (unlikely(p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		p[0], p[1], p[2], p[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	nfs4_print_deviceid(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	p = xdr_inline_decode(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	fh->size = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	if (fh->size > NFS_MAXFHSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		       fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* fh.data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	p = xdr_inline_decode(xdr, fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	memcpy(&fh->data, p, fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	dprintk("%s: fh len %d\n", __func__, fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * Currently only stringified uids and gids are accepted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * I.e., kerberos is not supported to the DSes, so no pricipals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * That means that one common function will suffice, but when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * principals are added, this should be split to accomodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) decode_name(struct xdr_stream *xdr, u32 *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	/* opaque_length(4)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	p = xdr_inline_decode(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	len = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	dprintk("%s: len %u\n", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	/* opaque body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	p = xdr_inline_decode(xdr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (!nfs_map_string_to_numeric((char *)p, len, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		const struct nfs4_ff_layout_mirror *m2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	for (i = 0; i < m1->fh_versions_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		bool found_fh = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		for (j = 0; j < m2->fh_versions_cnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			if (nfs_compare_fh(&m1->fh_versions[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 					&m2->fh_versions[j]) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				found_fh = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		if (!found_fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static struct nfs4_ff_layout_mirror *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		struct nfs4_ff_layout_mirror *mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	struct nfs4_ff_layout_mirror *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		if (!ff_mirror_match_fh(mirror, pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		if (refcount_inc_not_zero(&pos->ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	list_add(&mirror->mirrors, &ff_layout->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	mirror->layout = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	return mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (mirror->layout == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	inode = mirror->layout->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	list_del(&mirror->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	mirror->layout = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	if (mirror != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		spin_lock_init(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		refcount_set(&mirror->ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		INIT_LIST_HEAD(&mirror->mirrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	return mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	const struct cred	*cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	ff_layout_remove_mirror(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	kfree(mirror->fh_versions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	cred = rcu_access_pointer(mirror->ro_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	cred = rcu_access_pointer(mirror->rw_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	kfree(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		ff_layout_free_mirror(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	for (i = 0; i < fls->mirror_array_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		ff_layout_put_mirror(fls->mirror_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (fls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		ff_layout_free_mirror_array(fls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		kfree(fls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		struct pnfs_layout_segment *l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		const struct pnfs_layout_range *l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	u64 end1, end2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (l1->iomode != l2->iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return l1->iomode != IOMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (end1 < l2->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (end2 < l1->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return l2->offset <= l1->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) ff_lseg_merge(struct pnfs_layout_segment *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		struct pnfs_layout_segment *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	u64 new_end, old_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (new->pls_range.iomode != old->pls_range.iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			old->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (old_end < new->pls_range.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			new->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (new_end < old->pls_range.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (!ff_lseg_match_mirrors(new, old))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	/* Mergeable: copy info from 'old' to 'new' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (new_end < old_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		new_end = old_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (new->pls_range.offset < old->pls_range.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		new->pls_range.offset = old->pls_range.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		struct list_head *free_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	pnfs_generic_layout_insert_lseg(lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			ff_lseg_range_is_after,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			ff_lseg_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			if (fls->mirror_array[i]->efficiency <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			    fls->mirror_array[j]->efficiency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 				swap(fls->mirror_array[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 				     fls->mirror_array[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		     struct nfs4_layoutget_res *lgr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		     gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct pnfs_layout_segment *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct nfs4_ff_layout_segment *fls = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct xdr_stream stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct xdr_buf buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct page *scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	u64 stripe_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	u32 mirror_array_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	dprintk("--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	scratch = alloc_page(gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (!scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			      lgr->layoutp->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	/* stripe unit and mirror_array_cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	p = xdr_inline_decode(&stream, 8 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	p = xdr_decode_hyper(p, &stripe_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	mirror_array_cnt = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		stripe_unit, mirror_array_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	    mirror_array_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (!fls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	fls->mirror_array_cnt = mirror_array_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	fls->stripe_unit = stripe_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	for (i = 0; i < fls->mirror_array_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		struct cred *kcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		const struct cred __rcu *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		u32 ds_count, fh_count, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		p = xdr_inline_decode(&stream, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		ds_count = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		/* FIXME: allow for striping? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		if (ds_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (fls->mirror_array[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		fls->mirror_array[i]->ds_count = ds_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		/* deviceid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		/* efficiency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		p = xdr_inline_decode(&stream, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		/* stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		/* fh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		p = xdr_inline_decode(&stream, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		fh_count = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		fls->mirror_array[i]->fh_versions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			kcalloc(fh_count, sizeof(struct nfs_fh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		if (fls->mirror_array[i]->fh_versions == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		for (j = 0; j < fh_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			rc = decode_nfs_fh(&stream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 					   &fls->mirror_array[i]->fh_versions[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		/* user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		rc = decode_name(&stream, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		uid = make_kuid(&init_user_ns, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		/* group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		rc = decode_name(&stream, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		gid = make_kgid(&init_user_ns, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		if (gfp_flags & __GFP_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			kcred = prepare_kernel_cred(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			unsigned int nofs_flags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			kcred = prepare_kernel_cred(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			memalloc_nofs_restore(nofs_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		if (!kcred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			goto out_err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		kcred->fsuid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		kcred->fsgid = gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		cred = RCU_INITIALIZER(kcred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		if (lgr->range.iomode == IOMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		if (mirror != fls->mirror_array[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			/* swap cred ptrs so free_mirror will clean up old */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			if (lgr->range.iomode == IOMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				cred = xchg(&mirror->ro_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				cred = xchg(&mirror->rw_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			ff_layout_free_mirror(fls->mirror_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			fls->mirror_array[i] = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			from_kuid(&init_user_ns, uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			from_kgid(&init_user_ns, gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	p = xdr_inline_decode(&stream, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		goto out_sort_mirrors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	fls->flags = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	p = xdr_inline_decode(&stream, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		goto out_sort_mirrors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	for (i=0; i < fls->mirror_array_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) out_sort_mirrors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	ff_layout_sort_mirrors(fls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	ret = &fls->generic_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	dprintk("<-- %s (success)\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) out_free_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	__free_page(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) out_err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	_ff_layout_free_lseg(fls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	ret = ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	dprintk("<-- %s (%d)\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	goto out_free_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dprintk("--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (lseg->pls_range.iomode == IOMODE_RW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		struct nfs4_flexfile_layout *ffl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		inode = ffl->generic_hdr.plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	_ff_layout_free_lseg(fls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	/* first IO request? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (atomic_inc_return(&timer->n_ops) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		timer->start_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static ktime_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (atomic_dec_return(&timer->n_ops) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	start = timer->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	timer->start_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return ktime_sub(now, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			    struct nfs4_ff_layoutstat *layoutstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			    ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (!mirror->start_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		mirror->start_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (mirror->report_interval != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		report_interval = (s64)mirror->report_interval * 1000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	else if (layoutstats_timer != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		report_interval = (s64)layoutstats_timer * 1000LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			report_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		ffl->last_report_time = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		__u64 requested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	iostat->ops_requested++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	iostat->bytes_requested += requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		__u64 requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		__u64 completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		ktime_t time_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		ktime_t time_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	ktime_t completion_time = ktime_sub(time_completed, time_started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	ktime_t timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	iostat->ops_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	iostat->bytes_completed += completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	iostat->bytes_not_delivered += requested - completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	iostat->total_busy_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			ktime_add(iostat->total_busy_time, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	iostat->aggregate_completion_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			ktime_add(iostat->aggregate_completion_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 					completion_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) nfs4_ff_layout_stat_io_start_read(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		struct nfs4_ff_layout_mirror *mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		__u64 requested, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	bool report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (report)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		pnfs_report_layoutstat(inode, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		struct nfs4_ff_layout_mirror *mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		__u64 requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		__u64 completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			requested, completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			ktime_get(), task->tk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) nfs4_ff_layout_stat_io_start_write(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		struct nfs4_ff_layout_mirror *mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		__u64 requested, ktime_t now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	bool report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (report)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		pnfs_report_layoutstat(inode, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		struct nfs4_ff_layout_mirror *mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		__u64 requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		__u64 completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		enum nfs3_stable_how committed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	if (committed == NFS_UNSTABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		requested = completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			requested, completed, ktime_get(), task->tk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		nfs4_mark_deviceid_unavailable(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		nfs4_mark_deviceid_available(devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static struct nfs4_pnfs_ds *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			     u32 start_idx, u32 *best_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			     bool check_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	bool fail_return = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/* mirrors are initially sorted by efficiency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		if (idx+1 == fls->mirror_array_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			fail_return = !check_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		mirror = FF_LAYOUT_COMP(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		if (check_device &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		*best_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		return ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static struct nfs4_pnfs_ds *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 				 u32 start_idx, u32 *best_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static struct nfs4_pnfs_ds *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				   u32 start_idx, u32 *best_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static struct nfs4_pnfs_ds *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				  u32 start_idx, u32 *best_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		return ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static struct nfs4_pnfs_ds *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			  u32 *best_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 					       best_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (ds || !pgio->pg_mirror_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		return ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		      struct nfs_page *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		      bool strict_iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	pnfs_put_lseg(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 					   nfs_req_openctx(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 					   req_offset(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 					   req->wb_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 					   IOMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 					   strict_iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 					   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (IS_ERR(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			  struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	pnfs_generic_pg_check_layout(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	pnfs_generic_pg_check_range(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct nfs_pgio_mirror *pgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	u32 ds_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	ff_layout_pg_check_layout(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	/* Use full layout for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (!pgio->pg_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		ff_layout_pg_get_read(pgio, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (!pgio->pg_lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			goto out_nolseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		ff_layout_pg_get_read(pgio, req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		if (!pgio->pg_lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			goto out_nolseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (!ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			goto out_mds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		pnfs_generic_pg_cleanup(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		/* Sleep for 1 second before retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	pgm = &pgio->pg_mirrors[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	pgio->pg_mirror_idx = ds_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (NFS_SERVER(pgio->pg_inode)->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		pgio->pg_maxretrans = io_maxretrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) out_nolseg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (pgio->pg_error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) out_mds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			0, NFS4_MAX_UINT64, IOMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			NFS_I(pgio->pg_inode)->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	pgio->pg_maxretrans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	nfs_pageio_reset_read_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct nfs_pgio_mirror *pgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	ff_layout_pg_check_layout(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (!pgio->pg_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 						   nfs_req_openctx(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 						   req_offset(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 						   req->wb_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 						   IOMODE_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 						   false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 						   GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		if (IS_ERR(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	/* If no lseg, fall back to write through mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (pgio->pg_lseg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		goto out_mds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		goto out_eagain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	for (i = 0; i < pgio->pg_mirror_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		if (!ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				goto out_mds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			pnfs_generic_pg_cleanup(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			/* Sleep for 1 second before retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		pgm = &pgio->pg_mirrors[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (NFS_SERVER(pgio->pg_inode)->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		pgio->pg_maxretrans = io_maxretrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) out_eagain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	pnfs_generic_pg_cleanup(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	pgio->pg_error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) out_mds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			0, NFS4_MAX_UINT64, IOMODE_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			NFS_I(pgio->pg_inode)->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pgio->pg_maxretrans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	nfs_pageio_reset_write_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	pgio->pg_error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				    struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (!pgio->pg_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 						   nfs_req_openctx(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 						   req_offset(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 						   req->wb_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 						   IOMODE_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 						   false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 						   GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		if (IS_ERR(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (pgio->pg_lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			0, NFS4_MAX_UINT64, IOMODE_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			NFS_I(pgio->pg_inode)->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	/* no lseg means that pnfs is not in use, so no mirroring here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	nfs_pageio_reset_write_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	u32 old = desc->pg_mirror_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	desc->pg_mirror_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) static struct nfs_pgio_mirror *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	return &desc->pg_mirrors[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	.pg_init = ff_layout_pg_init_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	.pg_test = pnfs_generic_pg_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	.pg_doio = pnfs_generic_pg_readpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	.pg_cleanup = pnfs_generic_pg_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	.pg_init = ff_layout_pg_init_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	.pg_test = pnfs_generic_pg_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	.pg_doio = pnfs_generic_pg_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	.pg_cleanup = pnfs_generic_pg_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	struct rpc_task *task = &hdr->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	pnfs_layoutcommit_inode(hdr->inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (retry_pnfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		dprintk("%s Reset task %5u for i/o through pNFS "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			hdr->task.tk_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			hdr->inode->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			(unsigned long long)NFS_FILEID(hdr->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			(unsigned long long)hdr->args.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		hdr->completion_ops->reschedule_io(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		dprintk("%s Reset task %5u for i/o through MDS "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			hdr->task.tk_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			hdr->inode->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			(unsigned long long)NFS_FILEID(hdr->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			(unsigned long long)hdr->args.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		trace_pnfs_mds_fallback_write_done(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				IOMODE_RW, NFS_I(hdr->inode)->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	u32 idx = hdr->pgio_mirror_idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	u32 new_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		ff_layout_send_layouterror(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	pnfs_read_resend_pnfs(hdr, new_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	struct rpc_task *task = &hdr->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	pnfs_layoutcommit_inode(hdr->inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		dprintk("%s Reset task %5u for i/o through MDS "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			hdr->task.tk_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			hdr->inode->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			(unsigned long long)NFS_FILEID(hdr->inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			(unsigned long long)hdr->args.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		trace_pnfs_mds_fallback_read_done(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				IOMODE_READ, NFS_I(hdr->inode)->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static int ff_layout_async_handle_error_v4(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 					   struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 					   struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					   struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 					   u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	case -NFS4ERR_BADSLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	case -NFS4ERR_BAD_HIGH_SLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	case -NFS4ERR_DEADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	case -NFS4ERR_SEQ_FALSE_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	case -NFS4ERR_SEQ_MISORDERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		dprintk("%s ERROR %d, Reset session. Exchangeid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			"flags 0x%x\n", __func__, task->tk_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			clp->cl_exchange_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	case -NFS4ERR_GRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	case -NFS4ERR_RETRY_UNCACHED_REP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	/* Invalidate Layout errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	case -NFS4ERR_PNFS_NO_LAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	case -ESTALE:           /* mapped NFS4ERR_STALE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	case -NFS4ERR_FHEXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	case -NFS4ERR_WRONG_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		dprintk("%s Invalid layout error %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		 * Destroy layout so new i/o will get a new layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		 * Layout will not be destroyed until all current lseg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		 * references are put. Mark layout as invalid to resend failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		 * i/o and all i/o waiting on the slot table to the MDS until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		 * layout is destroyed and a new valid layout is obtained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		pnfs_destroy_layout(NFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		rpc_wake_up(&tbl->slot_tbl_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	/* RPC connection errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		dprintk("%s DS connection error %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 				&devid->deviceid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		rpc_wake_up(&tbl->slot_tbl_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (ff_layout_avoid_mds_available_ds(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			return -NFS4ERR_RESET_TO_PNFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		dprintk("%s Retry through MDS. Error %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return -NFS4ERR_RESET_TO_MDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int ff_layout_async_handle_error_v3(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 					   struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 					   u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/* File access problems. Don't mark the device as unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	case -ESTALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	case -EISDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	case -EBADHANDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	case -ELOOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	case -EJUKEBOX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		goto out_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		dprintk("%s DS connection error %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 				&devid->deviceid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	/* FIXME: Need to prevent infinite looping here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	return -NFS4ERR_RESET_TO_PNFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) out_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	rpc_restart_call_prepare(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int ff_layout_async_handle_error(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 					struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 					struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 					struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 					u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	int vers = clp->cl_nfs_mod->rpc_vers->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (task->tk_status >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		ff_layout_mark_ds_reachable(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/* Handle the case of an invalid layout segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	if (!pnfs_is_valid_lseg(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		return -NFS4ERR_RESET_TO_PNFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	switch (vers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		return ff_layout_async_handle_error_v3(task, lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		return ff_layout_async_handle_error_v4(task, state, clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 						       lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		/* should never happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 					u32 idx, u64 offset, u64 length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 					u32 *op_status, int opnum, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	u32 status = *op_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		case -EPFNOSUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		case -EPROTONOSUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		case -EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			*op_status = status = NFS4ERR_NXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			*op_status = status = NFS4ERR_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	mirror = FF_LAYOUT_COMP(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 				       mirror, offset, length, status, opnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				       GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	case NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	case NFS4ERR_GRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	case NFS4ERR_NXIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		ff_layout_mark_ds_unreachable(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		 * Don't return the layout if this is a read and we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		 * have layouts to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		if (opnum == OP_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 						  lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* NFS_PROTO call done callback routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int ff_layout_read_done_cb(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (task->tk_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 					    hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 					    &hdr->res.op_status, OP_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 					    task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		trace_ff_layout_read_error(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 					   hdr->ds_clp, hdr->lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 					   hdr->pgio_mirror_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	trace_nfs4_pnfs_read(hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	case -NFS4ERR_RESET_TO_PNFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		return task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	case -NFS4ERR_RESET_TO_MDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		return task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		goto out_eagain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) out_eagain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	rpc_restart_call_prepare(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * We reference the rpc_cred of the first WRITE that triggers the need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * rfc5661 is not clear about which credential should be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * we always send layoutcommit after DS writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ff_layout_set_layoutcommit(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		loff_t end_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (!ff_layout_need_layoutcommit(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	pnfs_set_layoutcommit(inode, lseg, end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			task->tk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	nfs4_ff_layout_stat_io_end_read(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			hdr->res.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int ff_layout_read_prepare_common(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 					 struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		rpc_exit(task, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	ff_layout_read_record_layoutstats_start(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  * Call ops for the async read/write cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  * In the case of dense layouts, the offset needs to be reset to its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)  * original value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (ff_layout_read_prepare_common(task, hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	rpc_call_start(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (nfs4_setup_sequence(hdr->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				&hdr->args.seq_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				&hdr->res.seq_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	ff_layout_read_prepare_common(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static void ff_layout_read_call_done(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	    task->tk_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		nfs4_sequence_done(task, &hdr->res.seq_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/* Note this may cause RPC to be resent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	hdr->mds_ops->rpc_call_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	ff_layout_read_record_layoutstats_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	rpc_count_iostats_metrics(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void ff_layout_read_release(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		ff_layout_resend_pnfs_read(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		ff_layout_reset_read(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	pnfs_generic_rw_release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static int ff_layout_write_done_cb(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 				struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	loff_t end_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (task->tk_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 					    hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 					    &hdr->res.op_status, OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 					    task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		trace_ff_layout_write_error(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 					   hdr->ds_clp, hdr->lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 					   hdr->pgio_mirror_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	trace_nfs4_pnfs_write(hdr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	case -NFS4ERR_RESET_TO_PNFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		return task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	case -NFS4ERR_RESET_TO_MDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		return task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	    hdr->res.verf->committed == NFS_DATA_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	/* Note: if the write is unstable, don't set end_offs until commit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	/* zero out fattr since we don't care DS attr at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	hdr->fattr.valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (task->tk_status >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		nfs_writeback_update_inode(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int ff_layout_commit_done_cb(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 				     struct nfs_commit_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (task->tk_status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 					    data->args.offset, data->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 					    &data->res.op_status, OP_COMMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 					    task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		trace_ff_layout_commit_error(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 					   data->lseg, data->ds_commit_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	trace_nfs4_pnfs_commit_ds(data, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	case -NFS4ERR_RESET_TO_PNFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		pnfs_generic_prepare_to_resend_writes(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	case -NFS4ERR_RESET_TO_MDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		pnfs_generic_prepare_to_resend_writes(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		rpc_restart_call_prepare(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			task->tk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	nfs4_ff_layout_stat_io_end_write(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			hdr->args.count, hdr->res.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			hdr->res.verf->committed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int ff_layout_write_prepare_common(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 					  struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		rpc_exit(task, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	ff_layout_write_record_layoutstats_start(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (ff_layout_write_prepare_common(task, hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	rpc_call_start(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (nfs4_setup_sequence(hdr->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				&hdr->args.seq_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 				&hdr->res.seq_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 				task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	ff_layout_write_prepare_common(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static void ff_layout_write_call_done(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	    task->tk_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		nfs4_sequence_done(task, &hdr->res.seq_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	/* Note this may cause RPC to be resent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	hdr->mds_ops->rpc_call_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	ff_layout_write_record_layoutstats_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	rpc_count_iostats_metrics(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static void ff_layout_write_release(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		ff_layout_send_layouterror(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		ff_layout_reset_write(hdr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		ff_layout_reset_write(hdr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	pnfs_generic_rw_release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		struct nfs_commit_data *cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			0, task->tk_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		struct nfs_commit_data *cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct nfs_page *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	__u64 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (task->tk_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		list_for_each_entry(req, &cdata->pages, wb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 			count += req->wb_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	nfs4_ff_layout_stat_io_end_write(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			count, count, NFS_FILE_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static void ff_layout_commit_prepare_common(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		struct nfs_commit_data *cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	ff_layout_commit_record_layoutstats_start(task, cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	ff_layout_commit_prepare_common(task, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	rpc_call_start(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct nfs_commit_data *wdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (nfs4_setup_sequence(wdata->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				&wdata->args.seq_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 				&wdata->res.seq_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 				task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	ff_layout_commit_prepare_common(task, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static void ff_layout_commit_done(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	pnfs_generic_write_commit_done(task, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct nfs_commit_data *cdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	ff_layout_commit_record_layoutstats_done(task, cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	rpc_count_iostats_metrics(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) static void ff_layout_commit_release(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	struct nfs_commit_data *cdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	pnfs_generic_commit_release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	.rpc_call_prepare = ff_layout_read_prepare_v3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	.rpc_call_done = ff_layout_read_call_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	.rpc_count_stats = ff_layout_read_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	.rpc_release = ff_layout_read_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	.rpc_call_prepare = ff_layout_read_prepare_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	.rpc_call_done = ff_layout_read_call_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	.rpc_count_stats = ff_layout_read_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	.rpc_release = ff_layout_read_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	.rpc_call_prepare = ff_layout_write_prepare_v3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	.rpc_call_done = ff_layout_write_call_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	.rpc_count_stats = ff_layout_write_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	.rpc_release = ff_layout_write_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	.rpc_call_prepare = ff_layout_write_prepare_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	.rpc_call_done = ff_layout_write_call_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	.rpc_count_stats = ff_layout_write_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	.rpc_release = ff_layout_write_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	.rpc_call_done = ff_layout_commit_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	.rpc_count_stats = ff_layout_commit_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	.rpc_release = ff_layout_commit_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	.rpc_call_done = ff_layout_commit_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	.rpc_count_stats = ff_layout_commit_count_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	.rpc_release = ff_layout_commit_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	struct pnfs_layout_segment *lseg = hdr->lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	struct rpc_clnt *ds_clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	const struct cred *ds_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	loff_t offset = hdr->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	u32 idx = hdr->pgio_mirror_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	int vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	struct nfs_fh *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		__func__, hdr->inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	mirror = FF_LAYOUT_COMP(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 						   hdr->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	if (IS_ERR(ds_clnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	if (!ds_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	vers = nfs4_ff_layout_ds_version(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	hdr->pgio_done_cb = ff_layout_read_done_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	refcount_inc(&ds->ds_clp->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	hdr->ds_clp = ds->ds_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	fh = nfs4_ff_layout_select_ds_fh(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		hdr->args.fh = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	 * Note that if we ever decide to split across DSes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	 * then we may need to handle dense-like offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	hdr->args.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	hdr->mds_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	/* Perform an asynchronous read to ds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 				      &ff_layout_read_call_ops_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 			  0, RPC_TASK_SOFTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	put_cred(ds_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	return PNFS_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) out_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (ff_layout_avoid_mds_available_ds(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		return PNFS_TRY_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	return PNFS_NOT_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) /* Perform async writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	struct pnfs_layout_segment *lseg = hdr->lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	struct rpc_clnt *ds_clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	const struct cred *ds_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	loff_t offset = hdr->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	int vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	struct nfs_fh *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	u32 idx = hdr->pgio_mirror_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	mirror = FF_LAYOUT_COMP(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 						   hdr->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	if (IS_ERR(ds_clnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (!ds_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		goto out_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	vers = nfs4_ff_layout_ds_version(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	hdr->pgio_done_cb = ff_layout_write_done_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	refcount_inc(&ds->ds_clp->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	hdr->ds_clp = ds->ds_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	hdr->ds_commit_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	fh = nfs4_ff_layout_select_ds_fh(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	if (fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		hdr->args.fh = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	 * Note that if we ever decide to split across DSes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	 * then we may need to handle dense-like offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	hdr->args.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	/* Perform an asynchronous write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 				      &ff_layout_write_call_ops_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			  sync, RPC_TASK_SOFTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	put_cred(ds_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	return PNFS_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) out_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	if (ff_layout_avoid_mds_available_ds(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		return PNFS_TRY_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			hdr->args.offset, hdr->args.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	return PNFS_NOT_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) static struct nfs_fh *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	/* FIXME: Assume that there is only one NFS version available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	 * for the DS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	return &flseg->mirror_array[i]->fh_versions[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	struct pnfs_layout_segment *lseg = data->lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	struct nfs4_pnfs_ds *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	struct rpc_clnt *ds_clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	const struct cred *ds_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	int vers, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct nfs_fh *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	mirror = FF_LAYOUT_COMP(lseg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 						   data->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	if (IS_ERR(ds_clnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	if (!ds_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	vers = nfs4_ff_layout_ds_version(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	data->commit_done_cb = ff_layout_commit_done_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	data->cred = ds_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	refcount_inc(&ds->ds_clp->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	data->ds_clp = ds->ds_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	if (fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		data->args.fh = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 					       &ff_layout_commit_call_ops_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 				   how, RPC_TASK_SOFTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	put_cred(ds_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	pnfs_generic_prepare_to_resend_writes(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	pnfs_generic_commit_release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			   int how, struct nfs_commit_info *cinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 					    ff_layout_initiate_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static struct pnfs_ds_commit_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) ff_layout_get_ds_info(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	if (layout == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct inode *inode = lseg->pls_layout->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	struct pnfs_commit_array *array, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		if (array != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			pnfs_free_commit_array(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 						  id_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				  const struct nfs4_layoutreturn_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	__be32 *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	start = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	if (unlikely(!start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	*start = cpu_to_be32(ff_args->num_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	/* This assume we always return _ALL_ layouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			    const nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			    const struct nfs42_layoutstat_devinfo *devinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	p = xdr_reserve_space(xdr, 8 + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	p = xdr_encode_hyper(p, devinfo->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	p = xdr_encode_hyper(p, devinfo->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	p = xdr_reserve_space(xdr, 4*8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	p = xdr_encode_hyper(p, devinfo->read_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	p = xdr_encode_hyper(p, devinfo->read_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	p = xdr_encode_hyper(p, devinfo->write_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	p = xdr_encode_hyper(p, devinfo->write_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 			    const nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 			    const struct nfs42_layoutstat_devinfo *devinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			devinfo->ld_private.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* report nothing for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		const struct nfs4_layoutreturn_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		struct nfs4_flexfile_layoutreturn_args *ff_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	*p = cpu_to_be32(ff_args->num_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	for (i = 0; i < ff_args->num_dev; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		ff_layout_encode_ff_iostat(xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 				&args->layout->plh_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 				&ff_args->devinfo[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		unsigned int num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	for (i = 0; i < num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		if (!devinfo[i].ld_private.ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		if (!devinfo[i].ld_private.ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static struct nfs4_deviceid_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) ff_layout_alloc_deviceid_node(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			      struct pnfs_device *pdev, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct nfs4_ff_layout_ds *dsaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	if (!dsaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	return &dsaddr->id_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		const void *voidargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		const struct nfs4_xdr_opaque_data *ff_opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	const struct nfs4_layoutreturn_args *args = voidargs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	struct xdr_buf tmp_buf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		.head = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			[0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 				.iov_base = page_address(ff_args->pages[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		.buflen = PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	struct xdr_stream tmp_xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	__be32 *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	dprintk("%s: Begin\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	start = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	*start = cpu_to_be32(tmp_buf.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	dprintk("%s: Return\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	struct nfs4_flexfile_layoutreturn_args *ff_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	if (!args->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	ff_args = args->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	args->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	ff_layout_free_ds_ioerr(&ff_args->errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	put_page(ff_args->pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	kfree(ff_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	.encode = ff_layout_encode_layoutreturn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	.free = ff_layout_free_layoutreturn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	struct nfs4_flexfile_layoutreturn_args *ff_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	if (!ff_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		goto out_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	if (!ff_args->pages[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		goto out_nomem_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	INIT_LIST_HEAD(&ff_args->errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			&args->range, &ff_args->errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 			FF_LAYOUTRETURN_MAXERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	spin_lock(&args->inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	spin_unlock(&args->inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	args->ld_private->ops = &layoutreturn_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	args->ld_private->data = ff_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) out_nomem_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	kfree(ff_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) out_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	struct nfs42_layout_error *errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (list_empty(&head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			sizeof(*errors), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	if (errors != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		const struct nfs4_ff_layout_ds_err *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		size_t n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		list_for_each_entry(pos, &head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			errors[n].offset = pos->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			errors[n].length = pos->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			errors[n].errors[0].dev_id = pos->deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			errors[n].errors[0].status = pos->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			errors[n].errors[0].opnum = pos->opnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			if (!list_is_last(&pos->list, &head) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			    n < NFS42_LAYOUTERROR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 			n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		kfree(errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	ff_layout_free_ds_ioerr(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			  const int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	const struct in6_addr *addr = &sin6->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	 * RFC 4291, Section 2.2.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	 * Shorthanded ANY address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	if (ipv6_addr_any(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		return snprintf(buf, buflen, "::");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	 * RFC 4291, Section 2.2.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	 * Shorthanded loopback address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	if (ipv6_addr_loopback(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		return snprintf(buf, buflen, "::1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	 * RFC 4291, Section 2.2.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	 * Special presentation address format for mapped v4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	 * addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	if (ipv6_addr_v4mapped(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		return snprintf(buf, buflen, "::ffff:%pI4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 					&addr->s6_addr32[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 * RFC 4291, Section 2.2.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	return snprintf(buf, buflen, "%pI6c", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /* Derived from rpc_sockaddr2uaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	char portbuf[RPCBIND_MAXUADDRPLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	char addrbuf[RPCBIND_MAXUADDRLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	char *netid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	unsigned short port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	int len, netid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	switch (sap->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		netid = "tcp";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		netid_len = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		netid = "tcp6";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		netid_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		/* we only support tcp and tcp6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	p = xdr_reserve_space(xdr, 4 + netid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	xdr_encode_opaque(p, netid, netid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	p = xdr_reserve_space(xdr, 4 + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	xdr_encode_opaque(p, addrbuf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) ff_layout_encode_nfstime(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 			 ktime_t t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	struct timespec64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	p = xdr_reserve_space(xdr, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	ts = ktime_to_timespec64(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	p = xdr_encode_hyper(p, ts.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	*p++ = cpu_to_be32(ts.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) ff_layout_encode_io_latency(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 			    struct nfs4_ff_io_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	p = xdr_reserve_space(xdr, 5 * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	p = xdr_encode_hyper(p, stat->ops_requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	p = xdr_encode_hyper(p, stat->bytes_requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	p = xdr_encode_hyper(p, stat->ops_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	p = xdr_encode_hyper(p, stat->bytes_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 			      const struct nfs42_layoutstat_devinfo *devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			      struct nfs4_ff_layout_mirror *mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	struct nfs4_pnfs_ds_addr *da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	struct nfs_fh *fh = &mirror->fh_versions[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	dprintk("%s: DS %s: encoding address %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		__func__, ds->ds_remotestr, da->da_remotestr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	/* netaddr4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	ff_layout_encode_netaddr(xdr, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	/* nfs_fh4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	p = xdr_reserve_space(xdr, 4 + fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	xdr_encode_opaque(p, fh->data, fh->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	/* ff_io_latency4 read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	/* ff_io_latency4 write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	/* nfstime4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	/* bool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	*p = cpu_to_be32(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			     const struct nfs4_xdr_opaque_data *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 			struct nfs42_layoutstat_devinfo, ld_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	__be32 *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	/* layoutupdate length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	start = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	ff_layout_put_mirror(mirror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	.encode = ff_layout_encode_layoutstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	.free	= ff_layout_free_layoutstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 			       struct nfs42_layoutstat_devinfo *devinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			       int dev_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	struct nfs4_ff_layout_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	struct nfs4_deviceid_node *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		if (i >= dev_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		/* mirror refcount put in cleanup_layoutstats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		if (!refcount_inc_not_zero(&mirror->ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		dev = &mirror->mirror_ds->id_node; 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		devinfo->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		devinfo->length = NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		spin_lock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		spin_unlock(&mirror->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		devinfo->layout_type = LAYOUT_FLEX_FILES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		devinfo->ld_private.ops = &layoutstat_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		devinfo->ld_private.data = mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		devinfo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	struct nfs4_flexfile_layout *ff_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	if (!args->devinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	spin_lock(&args->inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			&args->devinfo[0], dev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	spin_unlock(&args->inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	if (!args->num_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		kfree(args->devinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		args->devinfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) ff_layout_set_layoutdriver(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		const struct nfs_fh *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) #if IS_ENABLED(CONFIG_NFS_V4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	server->caps |= NFS_CAP_LAYOUTSTATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static const struct pnfs_commit_ops ff_layout_commit_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	.setup_ds_info		= ff_layout_setup_ds_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	.release_ds_info	= ff_layout_release_ds_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	.mark_request_commit	= pnfs_layout_mark_request_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	.clear_request_commit	= pnfs_generic_clear_request_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	.commit_pagelist	= ff_layout_commit_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) static struct pnfs_layoutdriver_type flexfilelayout_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	.id			= LAYOUT_FLEX_FILES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	.name			= "LAYOUT_FLEX_FILES",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	.max_layoutget_response	= 4096, /* 1 page or so... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	.set_layoutdriver	= ff_layout_set_layoutdriver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	.free_layout_hdr	= ff_layout_free_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	.alloc_lseg		= ff_layout_alloc_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	.free_lseg		= ff_layout_free_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	.add_lseg		= ff_layout_add_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	.pg_read_ops		= &ff_layout_pg_read_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	.pg_write_ops		= &ff_layout_pg_write_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	.get_ds_info		= ff_layout_get_ds_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	.free_deviceid_node	= ff_layout_free_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	.read_pagelist		= ff_layout_read_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	.write_pagelist		= ff_layout_write_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	.sync			= pnfs_nfs_generic_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) static int __init nfs4flexfilelayout_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	return pnfs_register_layoutdriver(&flexfilelayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static void __exit nfs4flexfilelayout_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) MODULE_ALIAS("nfs-layouttype4-4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) module_init(nfs4flexfilelayout_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) module_exit(nfs4flexfilelayout_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) module_param(io_maxretrans, ushort, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 			"retries an I/O request before returning an error. ");