Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *  pNFS functions to call and manage layout drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  Copyright (c) 2002 [year of first publication]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  The Regents of the University of Michigan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  Dean Hildebrand <dhildebz@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  Permission is granted to use, copy, create derivative works, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *  redistribute this software and such derivative works for any purpose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *  so long as the name of the University of Michigan is not used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *  any advertising or publicity pertaining to the use or distribution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  of this software without specific, written prior authorization. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *  the above copyright notice or any other identification of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *  University of Michigan is included in any copy of any portion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *  this software, then the disclaimer below must also be included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *  This software is provided as is, without representation or warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *  of any kind either express or implied, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *  the implied warranties of merchantability, fitness for a particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *  purpose, or noninfringement.  The Regents of the University of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *  Michigan shall not be liable for any damages, including special,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *  indirect, incidental, or consequential damages, with respect to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *  claim arising out of or in connection with the use of the software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *  even if it has been or is hereafter advised of the possibility of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *  such damages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/nfs_page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "iostat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "nfs4trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "delegation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "nfs42.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "nfs4_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define NFSDBG_FACILITY		NFSDBG_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /* Locking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * pnfs_spinlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *      protects pnfs_modules_tbl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static DEFINE_SPINLOCK(pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * pnfs_modules_tbl holds all pnfs modules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static LIST_HEAD(pnfs_modules_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		struct list_head *free_me,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		u32 seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		                struct list_head *tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /* Return the registered pnfs layout driver module matching given id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static struct pnfs_layoutdriver_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) find_pnfs_driver_locked(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct pnfs_layoutdriver_type *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		if (local->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	local = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static struct pnfs_layoutdriver_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) find_pnfs_driver(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct pnfs_layoutdriver_type *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	spin_lock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	local = find_pnfs_driver_locked(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (local != NULL && !try_module_get(local->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		dprintk("%s: Could not grab reference on module\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		local = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	spin_unlock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	return local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	return find_pnfs_driver(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if (ld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		module_put(ld->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) unset_pnfs_layoutdriver(struct nfs_server *nfss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	if (nfss->pnfs_curr_ld) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (nfss->pnfs_curr_ld->clear_layoutdriver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		/* Decrement the MDS count. Purge the deviceid cache if zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			nfs4_deviceid_purge_client(nfss->nfs_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		module_put(nfss->pnfs_curr_ld->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	nfss->pnfs_curr_ld = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * When the server sends a list of layout types, we choose one in the order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * given in the list below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * FIXME: should this list be configurable in some fashion? module param?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * 	  mount option? something else?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static const u32 ld_prefs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	LAYOUT_SCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	LAYOUT_BLOCK_VOLUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	LAYOUT_OSD2_OBJECTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	LAYOUT_FLEX_FILES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	LAYOUT_NFSV4_1_FILES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) ld_cmp(const void *e1, const void *e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u32 ld1 = *((u32 *)e1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	u32 ld2 = *((u32 *)e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	for (i = 0; ld_prefs[i] != 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		if (ld1 == ld_prefs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		if (ld2 == ld_prefs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * Try to set the server's pnfs module to the pnfs layout type specified by id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * Currently only one pNFS layout driver per filesystem is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @ids array of layout types supported by MDS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		      struct nfs_fsinfo *fsinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct pnfs_layoutdriver_type *ld_type = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (fsinfo->nlayouttypes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		goto out_no_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (!(server->nfs_client->cl_exchange_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			__func__, server->nfs_client->cl_exchange_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		goto out_no_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	sort(fsinfo->layouttype, fsinfo->nlayouttypes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		sizeof(*fsinfo->layouttype), ld_cmp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	for (i = 0; i < fsinfo->nlayouttypes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		id = fsinfo->layouttype[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		ld_type = find_pnfs_driver(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		if (!ld_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 			request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 					id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			ld_type = find_pnfs_driver(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		if (ld_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (!ld_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		dprintk("%s: No pNFS module found!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		goto out_no_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	server->pnfs_curr_ld = ld_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	if (ld_type->set_layoutdriver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	    && ld_type->set_layoutdriver(server, mntfh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			"driver %u.\n", __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		module_put(ld_type->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		goto out_no_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	/* Bump the MDS count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	atomic_inc(&server->nfs_client->cl_mds_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	dprintk("%s: pNFS module for %u set\n", __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) out_no_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	dprintk("%s: Using NFSv4 I/O\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	server->pnfs_curr_ld = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	int status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct pnfs_layoutdriver_type *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	if (ld_type->id == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		printk(KERN_ERR "NFS: %s Layout driver must provide "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		       "alloc_lseg and free_lseg.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	spin_lock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	tmp = find_pnfs_driver_locked(ld_type->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			ld_type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			__func__, ld_type->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	spin_unlock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	spin_lock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	list_del(&ld_type->pnfs_tblid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	spin_unlock(&pnfs_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * pNFS client layout cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) /* Need to hold i_lock if caller does not already hold reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	refcount_inc(&lo->plh_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static struct pnfs_layout_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	return ld->alloc_layout_hdr(ino, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		list_del_rcu(&lo->plh_layouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	put_cred(lo->plh_lc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	return ld->free_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	dprintk("%s: freeing layout cache %p\n", __func__, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	nfsi->layout = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	/* Reset MDS Threshold I/O counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	nfsi->write_io = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	nfsi->read_io = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	unsigned long i_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	pnfs_layoutreturn_before_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (!list_empty(&lo->plh_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		pnfs_detach_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		i_state = inode->i_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		pnfs_free_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		/* Notify pnfs_destroy_layout_final() that we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (i_state & (I_FREEING | I_CLEAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			wake_up_var(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static struct inode *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct inode *inode = igrab(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * Compare 2 layout stateid sequence ids, to see which is newer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * taking into account wraparound issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	return (s32)(s1 - s2) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		lo->plh_barrier = newseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			 u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		iomode = IOMODE_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	lo->plh_return_iomode = iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * We must set lo->plh_return_seq to avoid livelocks with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 * pnfs_layout_need_return()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (seq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		seq = be32_to_cpu(lo->plh_stateid.seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		lo->plh_return_seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	pnfs_barrier_update(lo, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	lo->plh_return_iomode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	lo->plh_return_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		struct list_head *free_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * Update the seqid of a layout stateid after receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * NFS4ERR_OLD_STATEID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		struct pnfs_layout_range *dst_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		.iomode = IOMODE_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	lo = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (lo &&  pnfs_layout_is_valid(lo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	    nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		/* Is our call using the most recent seqid? If so, bump it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			nfs4_stateid_seqid_inc(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		/* Try to update the seqid to the most recent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (err != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			dst->seqid = lo->plh_stateid.seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			*dst_range = range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	pnfs_free_lseg_list(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * Mark a pnfs_layout_hdr and all associated layout segments as invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * In order to continue using the pnfs_layout_hdr, a full recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * Note that caller must hold inode->i_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		struct list_head *lseg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		.iomode = IOMODE_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct pnfs_layout_segment *lseg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		pnfs_clear_lseg_state(lseg, lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	pnfs_clear_layoutreturn_info(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	    !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		pnfs_clear_layoutreturn_waitbit(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	return !list_empty(&lo->plh_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) pnfs_iomode_to_fail_bit(u32 iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	return iomode == IOMODE_RW ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	lo->plh_retry_timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		refcount_inc(&lo->plh_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		refcount_dec(&lo->plh_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		.iomode = iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	pnfs_free_lseg_list(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			iomode == IOMODE_RW ?  "RW" : "READ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int fail_bit = pnfs_iomode_to_fail_bit(iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (test_bit(fail_bit, &lo->plh_flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	end = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		/* It is time to retry the failed layoutgets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		pnfs_layout_clear_fail_bit(lo, fail_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	INIT_LIST_HEAD(&lseg->pls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	INIT_LIST_HEAD(&lseg->pls_lc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	INIT_LIST_HEAD(&lseg->pls_commits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	refcount_set(&lseg->pls_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	lseg->pls_layout = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	lseg->pls_range = *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	lseg->pls_seq = be32_to_cpu(stateid->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	if (lseg != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		struct inode *inode = lseg->pls_layout->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	list_del_init(&lseg->pls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	refcount_dec(&lo->plh_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (list_empty(&lo->plh_segs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	    !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	    !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		if (atomic_read(&lo->plh_outstanding) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	    pnfs_layout_is_valid(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) pnfs_put_lseg(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (!lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		refcount_read(&lseg->pls_refcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	lo = lseg->pls_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		pnfs_layout_remove_lseg(lo, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		pnfs_free_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) EXPORT_SYMBOL_GPL(pnfs_put_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * is l2 fully contained in l1?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  *   start1                             end1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  *   [----------------------------------)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  *           start2           end2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  *           [----------------)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 const struct pnfs_layout_range *l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	u64 start1 = l1->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	u64 end1 = pnfs_end_offset(start1, l1->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	u64 start2 = l2->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	u64 end2 = pnfs_end_offset(start2, l2->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	return (start1 <= start2) && (end1 >= end2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		struct list_head *tmp_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (!refcount_dec_and_test(&lseg->pls_refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	list_add(&lseg->pls_list, tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /* Returns 1 if lseg is removed from list, 0 otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			     struct list_head *tmp_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		/* Remove the reference keeping the lseg in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 * list.  It will now be removed when all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 * outstanding io is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			refcount_read(&lseg->pls_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 const struct pnfs_layout_range *recall_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return (recall_range->iomode == IOMODE_ANY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		lseg_range->iomode == recall_range->iomode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		const struct pnfs_layout_range *recall_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (recall_range == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return pnfs_should_free_range(&lseg->pls_range, recall_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * @lo: layout header containing the lsegs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * @tmp_list: list head where doomed lsegs should go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * @recall_range: optional recall range argument to match (may be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * Walk the list of lsegs in the layout header, and tear down any that should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * be destroyed. If "recall_range" is specified then the segment must match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * that range. If "seq" is non-zero, then only match segments that were handed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * out at or before that sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * Returns number of matching invalid lsegs remaining in list after scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * it and purging them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			    struct list_head *tmp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			    const struct pnfs_layout_range *recall_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			    u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct pnfs_layout_segment *lseg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	int remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	dprintk("%s:Begin lo %p\n", __func__, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (list_empty(&lo->plh_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			dprintk("%s: freeing lseg %p iomode %d seq %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				"offset %llu length %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				lseg, lseg->pls_range.iomode, lseg->pls_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				lseg->pls_range.offset, lseg->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			if (!mark_lseg_invalid(lseg, tmp_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				remaining++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	dprintk("%s:Return %i\n", __func__, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		struct list_head *free_me,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct pnfs_layout_segment *lseg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		if (pnfs_match_lseg_recall(lseg, range, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			list_move_tail(&lseg->pls_list, free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) /* note free_me must contain lsegs from a single layout_hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) pnfs_free_lseg_list(struct list_head *free_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	struct pnfs_layout_segment *lseg, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (list_empty(free_me))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		list_del(&lseg->pls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		pnfs_free_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	LIST_HEAD(tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	spin_lock(&nfsi->vfs_inode.i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	lo = nfsi->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	if (lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		spin_unlock(&nfsi->vfs_inode.i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		pnfs_free_lseg_list(&tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		nfs_commit_inode(&nfsi->vfs_inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		spin_unlock(&nfsi->vfs_inode.i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	return lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) void pnfs_destroy_layout(struct nfs_inode *nfsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	__pnfs_destroy_layout(nfsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static bool pnfs_layout_removed(struct nfs_inode *nfsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	spin_lock(&nfsi->vfs_inode.i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	ret = nfsi->layout != lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	spin_unlock(&nfsi->vfs_inode.i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) pnfs_layout_add_bulk_destroy_list(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		struct list_head *layout_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	lo = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		list_add(&lo->plh_bulk_destroy, layout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) /* Caller must hold rcu_read_lock and clp->cl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		struct list_head *layout_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	__must_hold(&clp->cl_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	__must_hold(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct pnfs_layout_hdr *lo, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		    test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		    !list_empty(&lo->plh_bulk_destroy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		/* If the sb is being destroyed, just bail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		if (!nfs_sb_active(server->super))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		inode = pnfs_grab_inode_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (inode != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				list_del_rcu(&lo->plh_layouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			if (pnfs_layout_add_bulk_destroy_list(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 						layout_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		nfs_sb_deactive(server->super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		bool is_bulk_recall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	LIST_HEAD(lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	while (!list_empty(layout_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				plh_bulk_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		dprintk("%s freeing layout for inode %lu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			lo->plh_inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		pnfs_layoutcommit_inode(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		list_del_init(&lo->plh_bulk_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			if (is_bulk_recall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		pnfs_free_lseg_list(&lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		/* Free all lsegs that are attached to commit buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		nfs_commit_inode(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		nfs_iput_and_deactive(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		struct nfs_fsid *fsid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		bool is_recall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	LIST_HEAD(layout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 				server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 				&layout_list) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (list_empty(&layout_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) pnfs_destroy_layouts_byclid(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		bool is_recall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	LIST_HEAD(layout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 					server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 					&layout_list) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (list_empty(&layout_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * Called by the state manager to remove all layouts established under an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * expired lease.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) pnfs_destroy_all_layouts(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	nfs4_deviceid_mark_client_invalid(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	nfs4_deviceid_purge_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	pnfs_destroy_layouts_byclid(clp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	const struct cred *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		old = xchg(&lo->plh_lc_cred, get_cred(cred));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		put_cred(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) /* update lo->plh_stateid with new if is more recent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			const struct cred *cred, bool update_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	u32 newseq = be32_to_cpu(new->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	if (!pnfs_layout_is_valid(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		pnfs_set_layout_cred(lo, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		nfs4_stateid_copy(&lo->plh_stateid, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		lo->plh_barrier = newseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		pnfs_clear_layoutreturn_info(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (pnfs_seqid_is_newer(newseq, oldseq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		nfs4_stateid_copy(&lo->plh_stateid, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (update_barrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		pnfs_barrier_update(lo, newseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * Because of wraparound, we want to keep the barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * "close" to the current seqids. We really only want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * get here from a layoutget call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (atomic_read(&lo->plh_outstanding) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	u32 seqid = be32_to_cpu(stateid->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* lget is set to 1 if called from inside send_layoutget call chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	return lo->plh_block_lgets ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static struct nfs_server *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		server = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		struct dentry *parent_dir = dget_parent(ctx->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		server = NFS_SERVER(parent_dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		dput(parent_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static void nfs4_free_pages(struct page **pages, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		if (!pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		__free_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		pages[i] = alloc_page(gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		if (!pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			dprintk("%s: failed to allocate page\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			nfs4_free_pages(pages, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static struct nfs4_layoutget *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pnfs_alloc_init_layoutget_args(struct inode *ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	   struct nfs_open_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	   const nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	   const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	   gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct nfs_server *server = pnfs_find_server(ino, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	size_t max_pages = max_response_pages(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct nfs4_layoutget *lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	dprintk("--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	lgp = kzalloc(sizeof(*lgp), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	if (lgp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (max_reply_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (npages < max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			max_pages = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (!lgp->args.layout.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		kfree(lgp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	lgp->args.layout.pglen = max_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	lgp->res.layoutp = &lgp->args.layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	/* Don't confuse uninitialised result and success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	lgp->res.status = -NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	lgp->args.minlength = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (lgp->args.minlength > range->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		lgp->args.minlength = range->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	if (ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		loff_t i_size = i_size_read(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		if (range->iomode == IOMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			if (range->offset >= i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				lgp->args.minlength = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			else if (i_size - range->offset < lgp->args.minlength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				lgp->args.minlength = i_size - range->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	pnfs_copy_range(&lgp->args.range, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	lgp->args.type = server->pnfs_curr_ld->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	lgp->args.inode = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	lgp->args.ctx = get_nfs_open_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	nfs4_stateid_copy(&lgp->args.stateid, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	lgp->gfp_flags = gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	lgp->cred = ctx->cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	return lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	nfs4_free_pages(lgp->args.layout.pages, max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	if (lgp->args.inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	put_nfs_open_context(lgp->args.ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	kfree(lgp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static void pnfs_clear_layoutcommit(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct pnfs_layout_segment *lseg, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		pnfs_lseg_dec_and_remove_zero(lseg, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		const nfs4_stateid *arg_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	LIST_HEAD(freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	    !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (stateid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		u32 seq = be32_to_cpu(arg_stateid->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		pnfs_free_returned_lsegs(lo, &freeme, range, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		pnfs_set_layout_stateid(lo, stateid, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		pnfs_mark_layout_stateid_invalid(lo, &freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	pnfs_clear_layoutreturn_waitbit(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	pnfs_free_lseg_list(&freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		const struct cred **cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		enum pnfs_iomode *iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/* Serialise LAYOUTGET/LAYOUTRETURN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	if (atomic_read(&lo->plh_outstanding) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	nfs4_stateid_copy(stateid, &lo->plh_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	*cred = get_cred(lo->plh_lc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		if (lo->plh_return_seq != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			stateid->seqid = cpu_to_be32(lo->plh_return_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (iomode != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			*iomode = lo->plh_return_iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		pnfs_clear_layoutreturn_info(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	} else if (iomode != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		*iomode = IOMODE_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		const nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		enum pnfs_iomode iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	args->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	args->range.iomode = iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	args->range.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	args->range.length = NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	args->layout = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	nfs4_stateid_copy(&args->stateid, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		       const nfs4_stateid *stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		       const struct cred **pcred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		       enum pnfs_iomode iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		       bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct inode *ino = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	struct nfs4_layoutreturn *lrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	const struct cred *cred = *pcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	*pcred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (unlikely(lrp == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		pnfs_clear_layoutreturn_waitbit(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	lrp->args.ld_private = &lrp->ld_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	lrp->clp = NFS_SERVER(ino)->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	lrp->cred = cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (ld->prepare_layoutreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		ld->prepare_layoutreturn(&lrp->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	status = nfs4_proc_layoutreturn(lrp, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	dprintk("<-- %s status: %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 				enum pnfs_iomode iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	struct pnfs_layout_range recall_range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		.iomode = iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 					       &recall_range, seq) != -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Return true if layoutreturn is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 					       lo->plh_return_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	struct inode *inode= lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (pnfs_layout_need_return(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		nfs4_stateid stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		enum pnfs_iomode iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		bool send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		if (send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			/* Send an async layoutreturn so we dont deadlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * when the layout segment list is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * Note that a pnfs_layout_hdr can exist with an empty layout segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * deviceid is marked invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) _pnfs_return_layout(struct inode *ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	struct pnfs_layout_hdr *lo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	struct nfs_inode *nfsi = NFS_I(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		.iomode		= IOMODE_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		.offset		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		.length		= NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	LIST_HEAD(tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	nfs4_stateid stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	bool send, valid_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	lo = nfsi->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (!lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		dprintk("NFS: %s no layout to return\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	/* Reference matched in nfs4_layoutreturn_release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	/* Is there an outstanding layoutreturn ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 					TASK_UNINTERRUPTIBLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	valid_layout = pnfs_layout_is_valid(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	pnfs_clear_layoutcommit(ino, &tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/* Don't send a LAYOUTRETURN if list was initially empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			!valid_layout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		dprintk("NFS: %s no layout segments to return\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		goto out_wait_layoutreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) out_wait_layoutreturn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) out_put_layout_hdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	pnfs_free_lseg_list(&tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	dprintk("<-- %s status: %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) pnfs_commit_and_return_layout(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	lo = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (lo == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	/* Block new layoutgets and read/write to ds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	lo->plh_block_lgets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	filemap_fdatawait(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	ret = pnfs_layoutcommit_inode(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		ret = _pnfs_return_layout(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	lo->plh_block_lgets--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) bool pnfs_roc(struct inode *ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		struct nfs4_layoutreturn_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		struct nfs4_layoutreturn_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	struct nfs_inode *nfsi = NFS_I(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct pnfs_layout_segment *lseg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	const struct cred *lc_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	nfs4_stateid stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	enum pnfs_iomode iomode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	bool layoutreturn = false, roc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	bool skip_read = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (!nfs_have_layout(ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	lo = nfsi->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (!lo || !pnfs_layout_is_valid(lo) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		lo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		goto out_noroc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 				TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	/* no roc if we hold a delegation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (nfs4_check_delegation(ino, FMODE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		if (nfs4_check_delegation(ino, FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			goto out_noroc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		skip_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		state = ctx->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		if (state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		/* Don't return layout if there is open file state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		if (state->state & FMODE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			goto out_noroc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		if (state->state & FMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			skip_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		/* If we are sending layoutreturn, invalidate all valid lsegs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		 * Note: mark lseg for return so pnfs_layout_remove_lseg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		 * doesn't invalidate the layout for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		goto out_noroc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	/* ROC in two conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	 * 1. there are ROC lsegs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	 * 2. we don't send layoutreturn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	/* lo ref dropped in pnfs_roc_release() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	/* If the creds don't match, we can't compound the layoutreturn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		goto out_noroc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	roc = layoutreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	res->lrs_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	layoutreturn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	put_cred(lc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) out_noroc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	pnfs_layoutcommit_inode(ino, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (roc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		if (ld->prepare_layoutreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			ld->prepare_layoutreturn(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (layoutreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		  struct nfs4_layoutreturn_res **respp, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct nfs4_layoutreturn_args *arg = *argpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	int retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	/* Handle Layoutreturn errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	switch (*ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	case -NFS4ERR_NOMATCHING_LAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		/* Was there an RPC level error? If not, retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		if (task->tk_rpc_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		/* If the call was not sent, let caller handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		if (!RPC_WAS_SENT(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		 * Otherwise, assume the call succeeded and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		 * that we need to release the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		*ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		(*respp)->lrs_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		/* Let the caller handle the retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		*ret = -NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	case -NFS4ERR_OLD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 						     &arg->range, arg->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		*ret = -NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	*argpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	*respp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		struct nfs4_layoutreturn_res *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct pnfs_layout_hdr *lo = args->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	struct inode *inode = args->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	const nfs4_stateid *arg_stateid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	const nfs4_stateid *res_stateid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	case -NFS4ERR_NOMATCHING_LAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		if (pnfs_layout_is_valid(lo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		    nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			pnfs_set_plh_return_info(lo, args->range.iomode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		if (res->lrs_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			res_stateid = &res->stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		arg_stateid = &args->stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 			res_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (ld_private && ld_private->ops && ld_private->ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		ld_private->ops->free(ld_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct nfs_inode *nfsi = NFS_I(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)         struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)         bool sleep = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	/* we might not have grabbed lo reference. so need to check under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 * i_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)         spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)         lo = nfsi->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)         if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)                 sleep = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)         spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)         return sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * Compare two layout segments for sorting into layout cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  * We want to preferentially return RW over RO layouts, so ensure those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  * are seen first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static s64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	   const struct pnfs_layout_range *l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	s64 d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	/* high offset > low offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	d = l1->offset - l2->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* short length > long length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	d = l2->length - l1->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	/* read > read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		const struct pnfs_layout_range *l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	return pnfs_lseg_range_cmp(l1, l2) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		struct pnfs_layout_segment *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		   struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		   bool (*is_after)(const struct pnfs_layout_range *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			   const struct pnfs_layout_range *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		   bool (*do_merge)(struct pnfs_layout_segment *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			   struct pnfs_layout_segment *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		   struct list_head *free_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	struct pnfs_layout_segment *lp, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	dprintk("%s:Begin\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		if (do_merge(lseg, lp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			mark_lseg_invalid(lp, free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		if (is_after(&lseg->pls_range, &lp->pls_range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		list_add_tail(&lseg->pls_list, &lp->pls_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		dprintk("%s: inserted lseg %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			"iomode %d offset %llu length %llu before "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			"lp %p iomode %d offset %llu length %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			__func__, lseg, lseg->pls_range.iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			lseg->pls_range.offset, lseg->pls_range.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			lp, lp->pls_range.iomode, lp->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			lp->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	list_add_tail(&lseg->pls_list, &lo->plh_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	dprintk("%s: inserted lseg %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		"iomode %d offset %llu length %llu at tail\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		__func__, lseg, lseg->pls_range.iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		lseg->pls_range.offset, lseg->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	dprintk("%s:Return\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		   struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		   struct list_head *free_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	struct inode *inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	if (ld->add_lseg != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		ld->add_lseg(lo, lseg, free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		pnfs_generic_layout_insert_lseg(lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				pnfs_lseg_range_is_after,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 				pnfs_lseg_no_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 				free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static struct pnfs_layout_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) alloc_init_layout_hdr(struct inode *ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		      struct nfs_open_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		      gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	refcount_set(&lo->plh_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	INIT_LIST_HEAD(&lo->plh_layouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	INIT_LIST_HEAD(&lo->plh_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	INIT_LIST_HEAD(&lo->plh_return_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	lo->plh_inode = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	lo->plh_lc_cred = get_cred(ctx->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	return lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static struct pnfs_layout_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) pnfs_find_alloc_layout(struct inode *ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		       struct nfs_open_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		       gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	__releases(&ino->i_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	__acquires(&ino->i_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	struct nfs_inode *nfsi = NFS_I(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	struct pnfs_layout_hdr *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (nfsi->layout != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		goto out_existing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		nfsi->layout = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	} else if (new != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		pnfs_free_layout_hdr(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) out_existing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	pnfs_get_layout_hdr(nfsi->layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	return nfsi->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * iomode matching rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  * iomode	lseg	strict match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  *                      iomode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)  * -----	-----	------ -----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * ANY		READ	N/A    true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * ANY		RW	N/A    true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * RW		READ	N/A    false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  * RW		RW	N/A    true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  * READ		READ	N/A    true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * READ		RW	true   false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  * READ		RW	false  true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		 const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		 bool strict_iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	struct pnfs_layout_range range1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if ((range->iomode == IOMODE_RW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	     ls_range->iomode != IOMODE_RW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	    (range->iomode != ls_range->iomode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	     strict_iomode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	    !pnfs_lseg_range_intersecting(ls_range, range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	/* range1 covers only the first byte in the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	range1 = *range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	range1.length = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return pnfs_lseg_range_contained(ls_range, &range1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  * lookup range in layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) static struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) pnfs_find_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		bool strict_iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	struct pnfs_layout_segment *lseg, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	dprintk("%s:Begin\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		    pnfs_lseg_range_match(&lseg->pls_range, range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 					  strict_iomode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			ret = pnfs_get_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	dprintk("%s:Return lseg %p ref %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		__func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  * to the MDS or over pNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)  * The nfs_inode read_io and write_io fields are cumulative counters reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  * when there are no layout segments. Note that in pnfs_update_layout iomode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * WRITE request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * A return of true means use MDS I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  * From rfc 5661:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  * If a file's size is smaller than the file size threshold, data accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  * SHOULD be sent to the metadata server.  If an I/O request has a length that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * server.  If both file size and I/O size are provided, the client SHOULD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * reach or exceed  both thresholds before sending its read or write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  * requests to the data server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 				     struct inode *ino, int iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	struct nfs4_threshold *t = ctx->mdsthreshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct nfs_inode *nfsi = NFS_I(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	loff_t fsize = i_size_read(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	bool size = false, size_set = false, io = false, io_set = false, ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (t == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	switch (iomode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	case IOMODE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		if (t->bm & THRESHOLD_RD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			dprintk("%s fsize %llu\n", __func__, fsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			size_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			if (fsize < t->rd_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 				size = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		if (t->bm & THRESHOLD_RD_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			dprintk("%s nfsi->read_io %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 				nfsi->read_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			io_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			if (nfsi->read_io < t->rd_io_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 				io = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	case IOMODE_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		if (t->bm & THRESHOLD_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 			dprintk("%s fsize %llu\n", __func__, fsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			size_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			if (fsize < t->wr_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				size = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		if (t->bm & THRESHOLD_WR_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			dprintk("%s nfsi->write_io %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 				nfsi->write_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 			io_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			if (nfsi->write_io < t->wr_io_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 				io = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (size_set && io_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		if (size && io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	} else if (size || io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	 * send layoutcommit as it can hold up layoutreturn due to lseg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	 * reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	pnfs_layoutcommit_inode(lo->plh_inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 				   nfs_wait_bit_killable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 				   TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	atomic_inc(&lo->plh_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	if (atomic_dec_and_test(&lo->plh_outstanding))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		wake_up_var(&lo->plh_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	unsigned long *bitlock = &lo->plh_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static void _add_to_server_list(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 				struct nfs_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		/* The lo must be on the clp list if there is any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  * Layout segment is retreived from the server if not cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  * The appropriate layout segment is referenced and returned to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) pnfs_update_layout(struct inode *ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		   struct nfs_open_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		   loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		   u64 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		   enum pnfs_iomode iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		   bool strict_iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		   gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	struct pnfs_layout_range arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		.iomode = iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		.offset = pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		.length = count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	unsigned pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	struct nfs_server *server = NFS_SERVER(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	struct pnfs_layout_hdr *lo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	struct pnfs_layout_segment *lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	struct nfs4_layoutget *lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	nfs4_stateid stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	long timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 				 PNFS_UPDATE_LAYOUT_NO_PNFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 				 PNFS_UPDATE_LAYOUT_MDSTHRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) lookup_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	if (IS_ERR(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (lo == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 				 PNFS_UPDATE_LAYOUT_NOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	/* Do we even need to bother with this? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 				 PNFS_UPDATE_LAYOUT_BULK_RECALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		dprintk("%s matches recall, use MDS\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	/* if LAYOUTGET already failed once we don't try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	if (pnfs_layout_io_test_failed(lo, iomode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 				 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	 * If the layout segment list is empty, but there are outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	 * layoutget calls, then they might be subject to a layoutrecall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	    atomic_read(&lo->plh_outstanding) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 					!atomic_read(&lo->plh_outstanding)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		if (IS_ERR(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		goto lookup_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	 * Because we free lsegs when sending LAYOUTRETURN, we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	 * for LAYOUTRETURN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		dprintk("%s wait for layoutreturn\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (!IS_ERR(lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			dprintk("%s retrying\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 						 lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 						 PNFS_UPDATE_LAYOUT_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			goto lookup_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 					 PNFS_UPDATE_LAYOUT_RETURN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	if (lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 				PNFS_UPDATE_LAYOUT_FOUND_CACHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	 * Choose a stateid for the LAYOUTGET. If we don't have a layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	 * stateid, or it has been invalidated, then we must use the open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	 * stateid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		 * The first layoutget for the file. Need to serialize per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		 * RFC 5661 Errata 3208.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 				     &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 			spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 						NFS_LAYOUT_FIRST_LAYOUTGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 						TASK_KILLABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 			if (IS_ERR(lseg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 				goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 			pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 			dprintk("%s retrying\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 			goto lookup_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		status = nfs4_select_rw_stateid(ctx->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 					iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 					NULL, &stateid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			lseg = ERR_PTR(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			trace_pnfs_update_layout(ino, pos, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 					iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 					PNFS_UPDATE_LAYOUT_INVALID_OPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 			nfs4_schedule_stateid_recovery(server, ctx->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			pnfs_clear_first_layoutget(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			goto lookup_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	if (pnfs_layoutgets_blocked(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 				PNFS_UPDATE_LAYOUT_BLOCKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	nfs_layoutget_begin(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	_add_to_server_list(lo, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	pg_offset = arg.offset & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (pg_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		arg.offset -= pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		arg.length += pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	if (arg.length != NFS4_MAX_UINT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		arg.length = PAGE_ALIGN(arg.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	if (!lgp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 					 PNFS_UPDATE_LAYOUT_NOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		nfs_layoutget_end(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	lseg = nfs4_proc_layoutget(lgp, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	nfs_layoutget_end(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	if (IS_ERR(lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		switch(PTR_ERR(lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		case -EBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 			if (time_after(jiffies, giveup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 				lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		case -ERECALLCONFLICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		if (lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 				pnfs_clear_first_layoutget(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			trace_pnfs_update_layout(ino, pos, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 				iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			goto lookup_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) out_put_layout_hdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		pnfs_clear_first_layoutget(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 				 PNFS_UPDATE_LAYOUT_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			"(%s, offset: %llu, length: %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 			__func__, ino->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			(unsigned long long)NFS_FILEID(ino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			IS_ERR_OR_NULL(lseg) ? "not found" : "found",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			iomode==IOMODE_RW ?  "read/write" : "read-only",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			(unsigned long long)pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			(unsigned long long)count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	return lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	goto out_put_layout_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) EXPORT_SYMBOL_GPL(pnfs_update_layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	switch (range->iomode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	case IOMODE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	case IOMODE_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	if (range->offset == NFS4_MAX_UINT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	if (range->length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (range->length != NFS4_MAX_UINT64 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	    range->length > NFS4_MAX_UINT64 - range->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static struct pnfs_layout_hdr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	if (pnfs_layoutgets_blocked(lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	nfs_layoutget_begin(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	_add_to_server_list(lo, NFS_SERVER(ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	return lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static void _lgopen_prepare_attached(struct nfs4_opendata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 				     struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	struct inode *ino = data->dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	struct pnfs_layout_range rng = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			  IOMODE_RW: IOMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	struct nfs4_layoutget *lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	/* Heuristic: don't send layoutget if we have cached data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	if (rng.iomode == IOMODE_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	   (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	lo = _pnfs_grab_empty_layout(ino, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 					     &rng, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	if (!lgp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		pnfs_clear_first_layoutget(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		nfs_layoutget_end(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	data->lgp = lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	data->o_arg.lg_args = &lgp->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	data->o_res.lg_res = &lgp->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) static void _lgopen_prepare_floating(struct nfs4_opendata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 				     struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	struct pnfs_layout_range rng = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			  IOMODE_RW: IOMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	struct nfs4_layoutget *lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, &current_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 					     &rng, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	if (!lgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	data->lgp = lgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	data->o_arg.lg_args = &lgp->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	data->o_res.lg_res = &lgp->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) void pnfs_lgopen_prepare(struct nfs4_opendata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			 struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (!(pnfs_enabled_sb(server) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	      server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	/* Could check on max_ops, but currently hardcoded high enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	if (data->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		_lgopen_prepare_attached(data, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		_lgopen_prepare_floating(data, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		       struct nfs_open_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	struct nfs_server *srv = NFS_SERVER(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	u32 iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	if (!lgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	if (lgp->res.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		switch (lgp->res.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		 * Halt lgopen attempts if the server doesn't recognise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		 * the "current stateid" value, the layout type, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		 * layoutget operation as being valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		 * Also if it complains about too many ops in the compound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		 * or of the request/reply being too big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		case -NFS4ERR_BAD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		case -NFS4ERR_NOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		case -NFS4ERR_REP_TOO_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		case -NFS4ERR_REQ_TOO_BIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		case -NFS4ERR_TOO_MANY_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 			srv->caps &= ~NFS_CAP_LGOPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (!lgp->args.inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		lo = _pnfs_grab_empty_layout(ino, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		lgp->args.inode = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		lo = NFS_I(lgp->args.inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	lseg = pnfs_layout_process(lgp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	if (!IS_ERR(lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		iomode = lgp->args.range.iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		pnfs_put_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	if (lgp != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		struct inode *inode = lgp->args.inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			pnfs_clear_first_layoutget(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			nfs_layoutget_end(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		pnfs_layoutget_free(lgp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) pnfs_layout_process(struct nfs4_layoutget *lgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	struct nfs4_layoutget_res *res = &lgp->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	struct inode *ino = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	LIST_HEAD(free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	if (!pnfs_sanity_check_layout_range(&res->range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	/* Inject layout blob into I/O device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	if (IS_ERR_OR_NULL(lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		if (!lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 			lseg = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		dprintk("%s: Could not allocate layout: error %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		       __func__, PTR_ERR(lseg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		return lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	if (pnfs_layoutgets_blocked(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		dprintk("%s forget reply due to state\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		goto out_forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		goto out_forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		/* existing state ID, make sure the sequence number matches. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 			if (!pnfs_layout_is_valid(lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 				lo->plh_barrier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			dprintk("%s forget reply due to sequence\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 			goto out_forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	} else if (pnfs_layout_is_valid(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		 * We got an entirely new state ID.  Mark all segments for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		 * inode invalid, and retry the layoutget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			.iomode = IOMODE_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 			.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 						&range, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		goto out_forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		/* We have a completely new layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	pnfs_get_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	pnfs_layout_insert_lseg(lo, lseg, &free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	if (res->return_on_close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	pnfs_free_lseg_list(&free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	return lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) out_forget:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	lseg->pls_layout = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	pnfs_free_lseg_list(&free_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)  * @lo: pointer to layout header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)  * @tmp_list: list header to be used with pnfs_free_lseg_list()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)  * @return_range: describe layout segment ranges to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)  * @seq: stateid seqid to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)  * This function is mainly intended for use by layoutrecall. It attempts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)  * to free the layout segment immediately, or else to mark it for return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)  * as soon as its reference count drops to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)  * Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)  * - 0: a layoutreturn needs to be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)  * - EBUSY: there are layout segment that are still in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)  * - ENOENT: there are no layout segments that need to be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 				struct list_head *tmp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 				const struct pnfs_layout_range *return_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 				u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	struct pnfs_layout_segment *lseg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	int remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	dprintk("%s:Begin lo %p\n", __func__, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	assert_spin_locked(&lo->plh_inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		tmp_list = &lo->plh_return_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 			dprintk("%s: marking lseg %p iomode %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 				"offset %llu length %llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 				lseg, lseg->pls_range.iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 				lseg->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 				lseg->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 				tmp_list = &lo->plh_return_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 			if (mark_lseg_invalid(lseg, tmp_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			remaining++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		pnfs_set_plh_return_info(lo, return_range->iomode, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	if (!list_empty(&lo->plh_return_segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		pnfs_set_plh_return_info(lo, return_range->iomode, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) pnfs_mark_layout_for_return(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			    const struct pnfs_layout_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	bool return_now = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	lo = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (!pnfs_layout_is_valid(lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	pnfs_set_plh_return_info(lo, range->iomode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	 * mark all matching lsegs so that we are sure to have no live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	 * for how it works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		nfs4_stateid stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		enum pnfs_iomode iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		if (return_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 			pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		nfs_commit_inode(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) void pnfs_error_mark_layout_for_return(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 				       struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		.iomode = lseg->pls_range.iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	pnfs_mark_layout_for_return(inode, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	return pnfs_layout_is_valid(lo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		!test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		!test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		     const struct pnfs_layout_range *range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		     enum pnfs_iomode iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 		if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			return lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) /* Find open file states whose mode matches that of the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 				 const struct pnfs_layout_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	fmode_t mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	if (!pnfs_layout_can_be_returned(lo) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	    !pnfs_find_first_lseg(lo, range, range->iomode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	head = &NFS_I(lo->plh_inode)->open_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	list_for_each_entry_rcu(ctx, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		if (ctx->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 			mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	switch (range->iomode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	case IOMODE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		mode &= ~FMODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	case IOMODE_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 		if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 			mode &= ~FMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	return mode == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	const struct pnfs_layout_range *range = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		if (!pnfs_layout_can_be_returned(lo) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		    test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		inode = lo->plh_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		if (!pnfs_should_return_unused_layout(lo, range)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 			spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		inode = pnfs_grab_inode_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		pnfs_mark_layout_for_return(inode, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) pnfs_layout_return_unused_byclid(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 				 enum pnfs_iomode iomode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	struct pnfs_layout_range range = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		.iomode = iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		.offset = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		.length = NFS4_MAX_UINT64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 			&range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	if (pgio->pg_lseg == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	    test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	pnfs_put_lseg(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)  * Check for any intersection between the request and the pgio->pg_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)  * and if none, put this pgio->pg_lseg away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		pnfs_put_lseg(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	u64 rd_size = req->wb_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	pnfs_generic_pg_check_layout(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	pnfs_generic_pg_check_range(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	if (pgio->pg_lseg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		if (pgio->pg_dreq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 						   nfs_req_openctx(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 						   req_offset(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 						   rd_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 						   IOMODE_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 						   false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 						   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		if (IS_ERR(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	/* If no lseg, fall back to read through mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	if (pgio->pg_lseg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		nfs_pageio_reset_read_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 			   struct nfs_page *req, u64 wb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	pnfs_generic_pg_check_layout(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	pnfs_generic_pg_check_range(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	if (pgio->pg_lseg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 						   nfs_req_openctx(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 						   req_offset(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 						   wb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 						   IOMODE_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 						   false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 						   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		if (IS_ERR(pgio->pg_lseg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 			pgio->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	/* If no lseg, fall back to write through mds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	if (pgio->pg_lseg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		nfs_pageio_reset_write_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (desc->pg_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		pnfs_put_lseg(desc->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		desc->pg_lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)  * of bytes (maximum @req->wb_bytes) that can be coalesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		     struct nfs_page *prev, struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	u64 seg_end, req_start, seg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	size = nfs_generic_pg_test(pgio, prev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	 * 'size' contains the number of bytes left in the current page (up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	 * to the original size asked for in @req->wb_bytes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	 * Calculate how many bytes are left in the layout segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	 * and if there are less bytes than 'size', return that instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	 * Please also note that 'end_offset' is actually the offset of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	 * first byte that lies outside the pnfs_layout_range. FIXME?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	if (pgio->pg_lseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 				     pgio->pg_lseg->pls_range.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		req_start = req_offset(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		/* start of request is past the last byte of this segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		if (req_start >= seg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		/* adjust 'size' iff there are fewer bytes left in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		 * segment than what nfs_generic_pg_test returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		seg_left = seg_end - req_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		if (seg_left < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 			size = (unsigned int)seg_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	struct nfs_pageio_descriptor pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	/* Resend all requests through the MDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 			      hdr->completion_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	return nfs_pageio_resend(&pgio, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	    PNFS_LAYOUTRET_ON_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		pnfs_return_layout(hdr->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)  * Called by non rpc-based layout drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	if (likely(!hdr->pnfs_error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 				hdr->mds_offset + hdr->res.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	if (unlikely(hdr->pnfs_error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 		pnfs_ld_handle_write_error(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	hdr->mds_ops->rpc_release(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		nfs_pageio_reset_write_mds(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		mirror->pg_recoalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	hdr->completion_ops->completion(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			const struct rpc_call_ops *call_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 			int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	struct inode *inode = hdr->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	enum pnfs_try_status trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	struct nfs_server *nfss = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	hdr->mds_ops = call_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		inode->i_ino, hdr->args.count, hdr->args.offset, how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	if (trypnfs != PNFS_NOT_ATTEMPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	return trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) pnfs_do_write(struct nfs_pageio_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	      struct nfs_pgio_header *hdr, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	enum pnfs_try_status trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	switch (trypnfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	case PNFS_NOT_ATTEMPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		pnfs_write_through_mds(desc, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	case PNFS_ATTEMPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	case PNFS_TRY_AGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		/* cleanup hdr and prepare to redo pnfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 			list_splice_init(&hdr->pages, &mirror->pg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			mirror->pg_recoalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		hdr->mds_ops->rpc_release(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	pnfs_put_lseg(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	nfs_pgio_header_free(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	struct nfs_pgio_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	if (!hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		desc->pg_error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		return desc->pg_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	ret = nfs_generic_pgio(desc, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		pnfs_do_write(desc, hdr, desc->pg_ioflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	struct nfs_pageio_descriptor pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	/* Resend all requests through the MDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	return nfs_pageio_resend(&pgio, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	    PNFS_LAYOUTRET_ON_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		pnfs_return_layout(hdr->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)  * Called by non rpc-based layout drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (likely(!hdr->pnfs_error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	if (unlikely(hdr->pnfs_error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		pnfs_ld_handle_read_error(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	hdr->mds_ops->rpc_release(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 		struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		nfs_pageio_reset_read_mds(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		mirror->pg_recoalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	hdr->completion_ops->completion(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)  * Call the appropriate parallel I/O subsystem read function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		       const struct rpc_call_ops *call_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		       struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	struct inode *inode = hdr->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	struct nfs_server *nfss = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	enum pnfs_try_status trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	hdr->mds_ops = call_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	dprintk("%s: Reading ino:%lu %u@%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (trypnfs != PNFS_NOT_ATTEMPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	return trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) /* Resend all requests through pnfs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 			   unsigned int mirror_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	struct nfs_pageio_descriptor pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		/* Prevent deadlocks with layoutreturn! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		pnfs_put_lseg(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		hdr->lseg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		nfs_pageio_init_read(&pgio, hdr->inode, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 					hdr->completion_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		pgio.pg_mirror_idx = mirror_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	struct pnfs_layout_segment *lseg = desc->pg_lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	enum pnfs_try_status trypnfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	switch (trypnfs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	case PNFS_NOT_ATTEMPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		pnfs_read_through_mds(desc, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	case PNFS_ATTEMPTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	case PNFS_TRY_AGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 		/* cleanup hdr and prepare to redo pnfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 			struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 			list_splice_init(&hdr->pages, &mirror->pg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 			mirror->pg_recoalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		hdr->mds_ops->rpc_release(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	pnfs_put_lseg(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	nfs_pgio_header_free(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	struct nfs_pgio_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	if (!hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		desc->pg_error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 		return desc->pg_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	ret = nfs_generic_pgio(desc, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 		pnfs_do_read(desc, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) static void pnfs_clear_layoutcommitting(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	unsigned long *bitlock = &NFS_I(inode)->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)  * There can be multiple RW segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 		if (lseg->pls_range.iomode == IOMODE_RW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 			list_add(&lseg->pls_lc_list, listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	struct pnfs_layout_segment *lseg, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	/* Matched by references in pnfs_set_layoutcommit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 		list_del_init(&lseg->pls_lc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		pnfs_put_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	pnfs_clear_layoutcommitting(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 		loff_t end_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	bool mark_as_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		nfsi->layout->plh_lwb = end_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		mark_as_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		dprintk("%s: Set layoutcommit for inode %lu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 			__func__, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	} else if (end_pos > nfsi->layout->plh_lwb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		nfsi->layout->plh_lwb = end_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		/* references matched in nfs4_layoutcommit_release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		pnfs_get_lseg(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	dprintk("%s: lseg %p end_pos %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		__func__, lseg, nfsi->layout->plh_lwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	if (mark_as_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		mark_inode_dirty_sync(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	struct nfs_server *nfss = NFS_SERVER(data->args.inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)  * data to disk to allow the server to recover the data if it crashes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)  * is off, and a COMMIT is sent to a data server, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)  * if WRITEs to a data server return NFS_DATA_SYNC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) pnfs_layoutcommit_inode(struct inode *inode, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	struct nfs4_layoutcommit_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	loff_t end_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	if (!pnfs_layoutcommit_outstanding(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		if (!sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		status = wait_on_bit_lock_action(&nfsi->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 				NFS_INO_LAYOUTCOMMITTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 				nfs_wait_bit_killable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 				TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	data = kzalloc(sizeof(*data), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 		goto clear_layoutcommitting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	INIT_LIST_HEAD(&data->lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	pnfs_list_write_lseg(inode, &data->lseg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	end_pos = nfsi->layout->plh_lwb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	data->cred = get_cred(nfsi->layout->plh_lc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	data->args.inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	nfs_fattr_init(&data->fattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	data->res.fattr = &data->fattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	if (end_pos != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		data->args.lastbytewritten = end_pos - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		data->args.lastbytewritten = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	data->res.server = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	if (ld->prepare_layoutcommit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		status = ld->prepare_layoutcommit(&data->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 			put_cred(data->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 			spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 			if (end_pos > nfsi->layout->plh_lwb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 				nfsi->layout->plh_lwb = end_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	status = nfs4_proc_layoutcommit(data, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		mark_inode_dirty_sync(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	dprintk("<-- %s status %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) clear_layoutcommitting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	pnfs_clear_layoutcommitting(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) pnfs_generic_sync(struct inode *inode, bool datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	return pnfs_layoutcommit_inode(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) EXPORT_SYMBOL_GPL(pnfs_generic_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	struct nfs4_threshold *thp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	thp = kzalloc(sizeof(*thp), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	if (!thp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 		dprintk("%s mdsthreshold allocation failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 	return thp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) #if IS_ENABLED(CONFIG_NFS_V4_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	struct nfs_server *server = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	struct nfs42_layoutstat_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	struct pnfs_layout_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	if (!NFS_I(inode)->layout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto out_clear_layoutstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	hdr = NFS_I(inode)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	pnfs_get_layout_hdr(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	data = kzalloc(sizeof(*data), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	data->args.fh = NFS_FH(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	data->args.inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	status = ld->prepare_layoutstats(&data->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 	dprintk("%s returns %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	pnfs_put_layout_hdr(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) out_clear_layoutstats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) unsigned int layoutstats_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) module_param(layoutstats_timer, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) EXPORT_SYMBOL_GPL(layoutstats_timer);