Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * linux/fs/nfs/read.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Block I/O for NFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * modified for async RPC by okir@monad.swb.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/nfs_page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "nfs4_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "iostat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "fscache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "nfstrace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static const struct nfs_rw_ops nfs_rw_read_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static struct kmem_cache *nfs_rdata_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static struct nfs_pgio_header *nfs_readhdr_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		p->rw_mode = FMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	kmem_cache_free(nfs_rdata_cachep, rhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) int nfs_return_empty_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			      struct inode *inode, bool force_mds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			      const struct nfs_pgio_completion_ops *compl_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct nfs_server *server = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #ifdef CONFIG_NFS_V4_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (server->pnfs_curr_ld && !force_mds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		pg_ops = server->pnfs_curr_ld->pg_read_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			server->rsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct nfs_pgio_mirror *mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		pgio->pg_ops->pg_cleanup(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	pgio->pg_ops = &nfs_pgio_rw_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	/* read path should never have more than one mirror */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	mirror = &pgio->pg_mirrors[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static void nfs_readpage_release(struct nfs_page *req, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct page *page = req->wb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		(long long)req_offset(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		struct address_space *mapping = page_file_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			nfs_readpage_to_fscache(inode, page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		else if (!PageError(page) && !PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			generic_error_remove_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	nfs_release_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		       struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct nfs_page	*new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct nfs_pageio_descriptor pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct nfs_pgio_mirror *pgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	len = nfs_page_length(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		return nfs_return_empty_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	new = nfs_create_request(ctx, page, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (IS_ERR(new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return PTR_ERR(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (len < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		zero_user_segment(page, len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	nfs_pageio_init_read(&pgio, inode, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			     &nfs_async_read_completion_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (!nfs_pageio_add_request(&pgio, new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		nfs_list_remove_request(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		nfs_readpage_release(new, pgio.pg_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	nfs_pageio_complete(&pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/* It doesn't make sense to do mirrored reads! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	WARN_ON_ONCE(pgio.pg_mirror_count != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	pgm = &pgio.pg_mirrors[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	NFS_I(inode)->read_io += pgm->pg_bytes_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return pgio.pg_error < 0 ? pgio.pg_error : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void nfs_page_group_set_uptodate(struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		SetPageUptodate(req->wb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void nfs_read_completion(struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	unsigned long bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	while (!list_empty(&hdr->pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		struct page *page = req->wb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		unsigned long start = req->wb_pgbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		unsigned long end = req->wb_pgbase + req->wb_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			/* note: regions of the page not covered by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			 * request are zeroed in nfs_readpage_async /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			 * readpage_async_filler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			if (bytes > hdr->good_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				/* nothing in this request was good, so zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 				 * the full extent of the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				zero_user_segment(page, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				/* part of this request has good bytes, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				 * not all. zero the bad bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				start += hdr->good_bytes - bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				WARN_ON(start < req->wb_pgbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 				zero_user_segment(page, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		bytes += req->wb_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			if (bytes <= hdr->good_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 				nfs_page_group_set_uptodate(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				error = hdr->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				xchg(&nfs_req_openctx(req)->error, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			nfs_page_group_set_uptodate(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		nfs_list_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		nfs_readpage_release(req, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	hdr->release(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void nfs_initiate_read(struct nfs_pgio_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			      struct rpc_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			      const struct nfs_rpc_ops *rpc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			      struct rpc_task_setup *task_setup_data, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	struct inode *inode = hdr->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	task_setup_data->flags |= swap_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	rpc_ops->read_setup(hdr, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	trace_nfs_initiate_read(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) nfs_async_read_error(struct list_head *head, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct nfs_page	*req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		req = nfs_list_entry(head->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		nfs_list_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		nfs_readpage_release(req, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	.error_cleanup = nfs_async_read_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	.completion = nfs_read_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * This is the callback from RPC telling us whether a reply was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * received or some error occurred (timeout or socket shutdown).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int nfs_readpage_done(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			     struct nfs_pgio_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			     struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	int status = NFS_PROTO(inode)->read_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	trace_nfs_readpage_done(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (task->tk_status == -ESTALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		nfs_set_inode_stale(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		nfs_mark_for_revalidate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void nfs_readpage_retry(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			       struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct nfs_pgio_args *argp = &hdr->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct nfs_pgio_res  *resp = &hdr->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/* This is a short read! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	trace_nfs_readpage_short(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	/* Has the server at least made some progress? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (resp->count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		nfs_set_pgio_error(hdr, -EIO, argp->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	/* For non rpc-based layout drivers, retry-through-MDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (!task->tk_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		hdr->pnfs_error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* Yes, so retry the read at the end of the hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	hdr->mds_offset += resp->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	argp->offset += resp->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	argp->pgbase += resp->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	argp->count -= resp->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	resp->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	resp->eof = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	rpc_restart_call_prepare(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void nfs_readpage_result(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				struct nfs_pgio_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (hdr->res.eof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		loff_t pos = hdr->args.offset + hdr->res.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		unsigned int new = pos - hdr->io_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (hdr->good_bytes > new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			hdr->good_bytes = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			set_bit(NFS_IOHDR_EOF, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	} else if (hdr->res.count < hdr->args.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		nfs_readpage_retry(task, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * Read a page over NFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * We read the page synchronously in the following case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *  -	The error flag is set for this page. This happens only when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  *	previous async read operation failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int nfs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct inode *inode = page_file_mapping(page)->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		page, PAGE_SIZE, page_index(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * Try to flush any pending writes to the file..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * NOTE! Because we own the page lock, there cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * be any new pending writes generated at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * for this page (other pages can be written to).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	error = nfs_wb_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	error = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (NFS_STALE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (file == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		error = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		ctx = get_nfs_open_context(nfs_file_open_context(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (!IS_SYNC(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		error = nfs_readpage_from_fscache(ctx, inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if (error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	xchg(&ctx->error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	error = nfs_readpage_async(ctx, inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		error = wait_on_page_locked_killable(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		if (!PageUptodate(page) && !error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			error = xchg(&ctx->error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	put_nfs_open_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct nfs_readdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	struct nfs_pageio_descriptor *pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) readpage_async_filler(void *data, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct nfs_page *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	len = nfs_page_length(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		return nfs_return_empty_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	new = nfs_create_request(desc->ctx, page, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (IS_ERR(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (len < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		zero_user_segment(page, len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (!nfs_pageio_add_request(desc->pgio, new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		nfs_list_remove_request(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		error = desc->pgio->pg_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		nfs_readpage_release(new, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	error = PTR_ERR(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int nfs_readpages(struct file *filp, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		struct list_head *pages, unsigned nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct nfs_pageio_descriptor pgio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct nfs_pgio_mirror *pgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	struct nfs_readdesc desc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		.pgio = &pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	unsigned long npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	int ret = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			inode->i_sb->s_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			(unsigned long long)NFS_FILEID(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (NFS_STALE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (filp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		if (desc.ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	/* attempt to read as many of the pages as possible from the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	 * - this returns -ENOBUFS immediately if the cookie is negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 					 pages, &nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		goto read_complete; /* all pages were read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	nfs_pageio_init_read(&pgio, inode, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			     &nfs_async_read_completion_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	nfs_pageio_complete(&pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	/* It doesn't make sense to do mirrored reads! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	WARN_ON_ONCE(pgio.pg_mirror_count != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	pgm = &pgio.pg_mirrors[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	NFS_I(inode)->read_io += pgm->pg_bytes_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		 PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) read_complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	put_nfs_open_context(desc.ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int __init nfs_init_readpagecache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 					     sizeof(struct nfs_pgio_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 					     0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 					     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (nfs_rdata_cachep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) void nfs_destroy_readpagecache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	kmem_cache_destroy(nfs_rdata_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static const struct nfs_rw_ops nfs_rw_read_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	.rw_alloc_header	= nfs_readhdr_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	.rw_free_header		= nfs_readhdr_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	.rw_done		= nfs_readpage_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	.rw_result		= nfs_readpage_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	.rw_initiate		= nfs_initiate_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };