Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *  linux/fs/nfs/blocklayout/blocklayout.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  Module for the NFSv4.1 pNFS block layout driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (c) 2006 The Regents of the University of Michigan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  Andy Adamson <andros@citi.umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  Fred Isaman <iisaman@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * permission is granted to use, copy, create derivative works and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * redistribute this software and such derivative works for any purpose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * so long as the name of the university of michigan is not used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * any advertising or publicity pertaining to the use or distribution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * of this software without specific, written prior authorization.  if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * the above copyright notice or any other identification of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * university of michigan is included in any copy of any portion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * this software, then the disclaimer below must also be included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * this software is provided as is, without representation from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * university of michigan as to its fitness for any purpose, and without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * warranty by the university of michigan of any kind, either express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * or implied, including without limitation the implied warranties of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * merchantability and fitness for a particular purpose.  the regents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * of the university of michigan shall not be liable for any damages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * including special, indirect, incidental, or consequential damages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * with respect to any claim arising out or in connection with the use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * of the software, even if it has been or is hereafter advised of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * possibility of such damages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/bio.h>		/* struct bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "../pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "../nfs4session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "../internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "blocklayout.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static bool is_hole(struct pnfs_block_extent *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	switch (be->be_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	case PNFS_BLOCK_NONE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	case PNFS_BLOCK_INVALID_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		return be->be_tag ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /* The data we are handed might be spread across several bios.  We need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * to track when the last one is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) struct parallel_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct kref refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	void (*pnfs_callback) (void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static inline struct parallel_io *alloc_parallel(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct parallel_io *rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		rv->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		kref_init(&rv->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static inline void get_parallel(struct parallel_io *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	kref_get(&p->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static void destroy_parallel(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	p->pnfs_callback(p->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static inline void put_parallel(struct parallel_io *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	kref_put(&p->refcnt, destroy_parallel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static struct bio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) bl_submit_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		get_parallel(bio->bi_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			bio_op(bio) == READ ? "read" : "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			bio->bi_iter.bi_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			(unsigned long long)bio->bi_iter.bi_sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static struct bio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		bio_end_io_t end_io, struct parallel_io *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	npg = min(npg, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	bio = bio_alloc(GFP_NOIO, npg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (!bio && (current->flags & PF_MEMALLOC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		while (!bio && (npg /= 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			bio = bio_alloc(GFP_NOIO, npg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		bio->bi_iter.bi_sector = disk_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		bio_set_dev(bio, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		bio->bi_end_io = end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		bio->bi_private = par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return offset >= map->start && offset < map->start + map->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static struct bio *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		struct page *page, struct pnfs_block_dev_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		struct pnfs_block_extent *be, bio_end_io_t end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		struct parallel_io *par, unsigned int offset, int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	struct pnfs_block_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		container_of(be->be_device, struct pnfs_block_dev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	u64 disk_addr, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		npg, rw, (unsigned long long)isect, offset, *len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	/* translate to device offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	isect += be->be_v_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	isect -= be->be_f_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	/* translate to physical disk offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	disk_addr = (u64)isect << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (!offset_in_map(disk_addr, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		bio = bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	disk_addr += map->disk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	disk_addr -= map->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	/* limit length to what the device mapping allows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	end = disk_addr + *len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (end >= map->start + map->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		*len = map->start + map->len - disk_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (!bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		bio = bl_alloc_init_bio(npg, map->bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 				disk_addr >> SECTOR_SHIFT, end_io, par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		if (!bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		bio_set_op_attrs(bio, rw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (bio_add_page(bio, page, *len, offset) < *len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		bio = bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	size_t bytes_left = header->args.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	sector_t isect, extent_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct pnfs_block_extent be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	isect = header->args.offset >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	while (bytes_left > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		if (!ext_tree_lookup(bl, isect, &be, rw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		extent_length = be.be_length - (isect - be.be_f_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		nfs4_mark_deviceid_unavailable(be.be_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		isect += extent_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (bytes_left > extent_length << SECTOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			bytes_left -= extent_length << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			bytes_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static void bl_end_io_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct parallel_io *par = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		struct nfs_pgio_header *header = par->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		if (!header->pnfs_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			header->pnfs_error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		pnfs_set_lo_fail(header->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		bl_mark_devices_unavailable(header, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	put_parallel(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static void bl_read_cleanup(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct nfs_pgio_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	task = container_of(work, struct rpc_task, u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	hdr = container_of(task, struct nfs_pgio_header, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	pnfs_ld_read_done(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) bl_end_par_io_read(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	hdr->task.tk_status = hdr->pnfs_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	schedule_work(&hdr->task.u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) bl_read_pagelist(struct nfs_pgio_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct pnfs_block_extent be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	sector_t isect, extent_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct parallel_io *par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	loff_t f_offset = header->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	size_t bytes_left = header->args.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	unsigned int pg_offset = header->args.pgbase, pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct page **pages = header->args.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	int pg_index = header->args.pgbase >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	const bool is_dio = (header->dreq != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		header->page_array.npages, f_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		(unsigned int)header->args.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	par = alloc_parallel(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (!par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return PNFS_NOT_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	par->pnfs_callback = bl_end_par_io_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	/* Code assumes extents are page-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	for (i = pg_index; i < header->page_array.npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		if (extent_length <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			/* We've used up the previous extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			bio = bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			/* Get the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			if (!ext_tree_lookup(bl, isect, &be, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 				header->pnfs_error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			extent_length = be.be_length - (isect - be.be_f_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		if (is_dio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			if (pg_offset + bytes_left > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				pg_len = PAGE_SIZE - pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				pg_len = bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 			BUG_ON(pg_offset != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			pg_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		if (is_hole(&be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			bio = bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			/* Fill hole w/ zeroes w/o accessing device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			dprintk("%s Zeroing page for hole\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			zero_user_segment(pages[i], pg_offset, pg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			/* invalidate map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			map.start = NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			bio = do_add_page_to_bio(bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 						 header->page_array.npages - i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 						 READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 						 isect, pages[i], &map, &be,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 						 bl_end_io_read, par,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 						 pg_offset, &pg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			if (IS_ERR(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				header->pnfs_error = PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		isect += (pg_len >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		extent_length -= (pg_len >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		f_offset += pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		bytes_left -= pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		pg_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		header->res.eof = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		header->res.count = header->inode->i_size - header->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	put_parallel(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	return PNFS_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static void bl_end_io_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct parallel_io *par = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct nfs_pgio_header *header = par->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		if (!header->pnfs_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			header->pnfs_error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		pnfs_set_lo_fail(header->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		bl_mark_devices_unavailable(header, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	put_parallel(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /* Function scheduled for call during bl_end_par_io_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * it marks sectors as written and extends the commitlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static void bl_write_cleanup(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct nfs_pgio_header *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			container_of(task, struct nfs_pgio_header, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (likely(!hdr->pnfs_error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		u64 end = (hdr->args.offset + hdr->args.count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		u64 lwb = hdr->args.offset + hdr->args.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 					(end - start) >> SECTOR_SHIFT, lwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	pnfs_ld_write_done(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) /* Called when last of bios associated with a bl_write_pagelist call finishes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) static void bl_end_par_io_write(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct nfs_pgio_header *hdr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	hdr->task.tk_status = hdr->pnfs_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	hdr->verf.committed = NFS_FILE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	schedule_work(&hdr->task.u.tk_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) static enum pnfs_try_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) bl_write_pagelist(struct nfs_pgio_header *header, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct pnfs_block_extent be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	sector_t isect, extent_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct parallel_io *par = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	loff_t offset = header->args.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	size_t count = header->args.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct page **pages = header->args.pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	int pg_index = header->args.pgbase >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	unsigned int pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	dprintk("%s enter, %zu@%lld\n", __func__, count, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * We want to write each, and if there is an error set pnfs_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 * to have it redone using nfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	par = alloc_parallel(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (!par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return PNFS_NOT_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	par->pnfs_callback = bl_end_par_io_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/* we always write out the whole page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	offset = offset & (loff_t)PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	isect = offset >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	for (i = pg_index; i < header->page_array.npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		if (extent_length <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			/* We've used up the previous extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			bio = bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			/* Get the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			if (!ext_tree_lookup(bl, isect, &be, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 				header->pnfs_error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			extent_length = be.be_length - (isect - be.be_f_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		pg_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 					 WRITE, isect, pages[i], &map, &be,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 					 bl_end_io_write, par,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 					 0, &pg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		if (IS_ERR(bio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			header->pnfs_error = PTR_ERR(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		offset += pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		count -= pg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		isect += (pg_len >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		extent_length -= (pg_len >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	header->res.count = header->args.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	bl_submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	put_parallel(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	return PNFS_ATTEMPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	err = ext_tree_remove(bl, true, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	WARN_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	kfree_rcu(bl, bl_layout.plh_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		gfp_t gfp_flags, bool is_scsi_layout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct pnfs_block_layout *bl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	bl = kzalloc(sizeof(*bl), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (!bl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	bl->bl_ext_rw = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	bl->bl_ext_ro = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	spin_lock_init(&bl->bl_ext_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	bl->bl_scsi_layout = is_scsi_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return &bl->bl_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 						   gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	return __bl_alloc_layout_hdr(inode, gfp_flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 						   gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return __bl_alloc_layout_hdr(inode, gfp_flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static void bl_free_lseg(struct pnfs_layout_segment *lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	kfree(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) /* Tracks info needed to ensure extents in layout obey constraints of spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) struct layout_verification {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	u32 mode;	/* R or RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	u64 start;	/* Expected start of next non-COW extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	u64 inval;	/* Start of INVAL coverage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	u64 cowread;	/* End of COW read coverage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) /* Verify the extent meets the layout requirements of the pnfs-block draft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * section 2.3.1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static int verify_extent(struct pnfs_block_extent *be,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			 struct layout_verification *lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (lv->mode == IOMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		    be->be_state == PNFS_BLOCK_INVALID_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		if (be->be_f_offset != lv->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		lv->start += be->be_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	/* lv->mode == IOMODE_RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		if (be->be_f_offset != lv->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (lv->cowread > lv->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		lv->start += be->be_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		lv->inval = lv->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	} else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		if (be->be_f_offset != lv->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		lv->start += be->be_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	} else if (be->be_state == PNFS_BLOCK_READ_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (be->be_f_offset > lv->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		if (be->be_f_offset < lv->inval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (be->be_f_offset < lv->cowread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		/* It looks like you might want to min this with lv->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		 * but you really don't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		lv->inval = lv->inval + be->be_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		lv->cowread = be->be_f_offset + be->be_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static int decode_sector_number(__be32 **rp, sector_t *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	uint64_t s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	*rp = xdr_decode_hyper(*rp, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (s & 0x1ff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	*sp = s >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static struct nfs4_deviceid_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) bl_find_get_deviceid(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		const struct nfs4_deviceid *id, const struct cred *cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct nfs4_deviceid_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	end = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	start = end - PNFS_DEVICE_RETRY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (!time_in_range(node->timestamp_unavailable, start, end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		nfs4_delete_deviceid(node->ld, node->nfs_client, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		struct layout_verification *lv, struct list_head *extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct pnfs_block_extent *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct nfs4_deviceid id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	be = kzalloc(sizeof(*be), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (!be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	memcpy(&id, p, NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 						lo->plh_lc_cred, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (IS_ERR(be->be_device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		error = PTR_ERR(be->be_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		goto out_free_be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 * The next three values are read in as bytes, but stored in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	 * extent structure in 512-byte granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (decode_sector_number(&p, &be->be_f_offset) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		goto out_put_deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	if (decode_sector_number(&p, &be->be_length) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		goto out_put_deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (decode_sector_number(&p, &be->be_v_offset) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		goto out_put_deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	be->be_state = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	error = verify_extent(be, lv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		dprintk("%s: extent verification failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		goto out_put_deviceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	list_add_tail(&be->be_list, extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) out_put_deviceid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	nfs4_put_deviceid_node(be->be_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) out_free_be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	kfree(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static struct pnfs_layout_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	struct layout_verification lv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		.mode = lgr->range.iomode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		.start = lgr->range.offset >> SECTOR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		.inval = lgr->range.offset >> SECTOR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		.cowread = lgr->range.offset >> SECTOR_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct pnfs_layout_segment *lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct xdr_buf buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct xdr_stream xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct page *scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	int status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	uint32_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	LIST_HEAD(extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	dprintk("---> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	lseg = kzalloc(sizeof(*lseg), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (!lseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	scratch = alloc_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if (!scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	xdr_init_decode_pages(&xdr, &buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			lgr->layoutp->pages, lgr->layoutp->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	p = xdr_inline_decode(&xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		goto out_free_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	count = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	dprintk("%s: number of extents %d\n", __func__, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * Decode individual extents, putting them in temporary staging area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * until whole layout is decoded to make error recovery easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			goto process_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (lgr->range.offset + lgr->range.length !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			lv.start << SECTOR_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		dprintk("%s Final length mismatch\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		goto process_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (lv.start < lv.cowread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		dprintk("%s Final uncovered COW extent\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) process_extents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	while (!list_empty(&extents)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		struct pnfs_block_extent *be =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			list_first_entry(&extents, struct pnfs_block_extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 					 be_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		list_del(&be->be_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			status = ext_tree_insert(bl, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			nfs4_put_deviceid_node(be->be_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			kfree(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) out_free_scratch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	__free_page(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	dprintk("%s returns %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		/* Our extent block devices are unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		return lseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		kfree(lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		return ERR_PTR(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) bl_return_range(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		struct pnfs_layout_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	sector_t offset = range->offset >> SECTOR_SHIFT, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (range->offset % 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		dprintk("%s: offset %lld not block size aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			__func__, range->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (range->length != NFS4_MAX_UINT64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (range->length % 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			dprintk("%s: length %lld not block size aligned\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				__func__, range->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		end = offset + (range->length >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	return ext_tree_prepare_commit(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	dprintk("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (server->pnfs_blksize == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		dprintk("%s Server did not return blksize\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (server->pnfs_blksize > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			__func__, server->pnfs_blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) is_aligned_req(struct nfs_pageio_descriptor *pgio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		struct nfs_page *req, unsigned int alignment, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * Always accept buffered writes, higher layers take care of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * right alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (pgio->pg_dreq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (!IS_ALIGNED(req->wb_offset, alignment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (IS_ALIGNED(req->wb_bytes, alignment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (is_write &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	    (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		 * If the write goes up to the inode size, just write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 * the full page.  Data past the inode size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		 * guaranteed to be zeroed by the higher level client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		 * code, and this behaviour is mandated by RFC 5663
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		 * section 2.3.2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		nfs_pageio_reset_read_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	pnfs_generic_pg_init_read(pgio, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (pgio->pg_lseg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		pnfs_set_lo_fail(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		nfs_pageio_reset_read_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  * of bytes (maximum @req->wb_bytes) that can be coalesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return pnfs_generic_pg_test(pgio, prev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * Return the number of contiguous bytes for a given inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  * starting at page frame idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	pgoff_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/* Optimize common case that writes from 0 to end of file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (end != inode->i_mapping->nrpages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (!end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		return i_size_read(inode) - (idx << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return (end - idx) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	u64 wb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		nfs_pageio_reset_write_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (pgio->pg_dreq == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 					      req->wb_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	pnfs_generic_pg_init_write(pgio, req, wb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (pgio->pg_lseg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		pnfs_set_lo_fail(pgio->pg_lseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		nfs_pageio_reset_write_mds(pgio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * of bytes (maximum @req->wb_bytes) that can be coalesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 struct nfs_page *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return pnfs_generic_pg_test(pgio, prev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static const struct nfs_pageio_ops bl_pg_read_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	.pg_init = bl_pg_init_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	.pg_test = bl_pg_test_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	.pg_doio = pnfs_generic_pg_readpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	.pg_cleanup = pnfs_generic_pg_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static const struct nfs_pageio_ops bl_pg_write_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	.pg_init = bl_pg_init_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	.pg_test = bl_pg_test_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	.pg_doio = pnfs_generic_pg_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	.pg_cleanup = pnfs_generic_pg_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static struct pnfs_layoutdriver_type blocklayout_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	.id				= LAYOUT_BLOCK_VOLUME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	.name				= "LAYOUT_BLOCK_VOLUME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	.owner				= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	.flags				= PNFS_LAYOUTRET_ON_SETATTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 					  PNFS_LAYOUTRET_ON_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 					  PNFS_READ_WHOLE_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	.read_pagelist			= bl_read_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	.write_pagelist			= bl_write_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	.free_layout_hdr		= bl_free_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	.alloc_lseg			= bl_alloc_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	.free_lseg			= bl_free_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	.return_range			= bl_return_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	.prepare_layoutcommit		= bl_prepare_layoutcommit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	.set_layoutdriver		= bl_set_layoutdriver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	.alloc_deviceid_node		= bl_alloc_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	.free_deviceid_node		= bl_free_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	.pg_read_ops			= &bl_pg_read_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	.pg_write_ops			= &bl_pg_write_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	.sync				= pnfs_generic_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static struct pnfs_layoutdriver_type scsilayout_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	.id				= LAYOUT_SCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	.name				= "LAYOUT_SCSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	.owner				= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	.flags				= PNFS_LAYOUTRET_ON_SETATTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 					  PNFS_LAYOUTRET_ON_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 					  PNFS_READ_WHOLE_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	.read_pagelist			= bl_read_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	.write_pagelist			= bl_write_pagelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	.alloc_layout_hdr		= sl_alloc_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	.free_layout_hdr		= bl_free_layout_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	.alloc_lseg			= bl_alloc_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	.free_lseg			= bl_free_lseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	.return_range			= bl_return_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	.prepare_layoutcommit		= bl_prepare_layoutcommit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	.set_layoutdriver		= bl_set_layoutdriver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	.alloc_deviceid_node		= bl_alloc_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	.free_deviceid_node		= bl_free_deviceid_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	.pg_read_ops			= &bl_pg_read_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	.pg_write_ops			= &bl_pg_write_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	.sync				= pnfs_generic_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static int __init nfs4blocklayout_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	ret = bl_init_pipefs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	ret = pnfs_register_layoutdriver(&blocklayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		goto out_cleanup_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	ret = pnfs_register_layoutdriver(&scsilayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		goto out_unregister_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) out_unregister_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	pnfs_unregister_layoutdriver(&blocklayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) out_cleanup_pipe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	bl_cleanup_pipefs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static void __exit nfs4blocklayout_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	pnfs_unregister_layoutdriver(&scsilayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	pnfs_unregister_layoutdriver(&blocklayout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	bl_cleanup_pipefs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) MODULE_ALIAS("nfs-layouttype4-3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) MODULE_ALIAS("nfs-layouttype4-5");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) module_init(nfs4blocklayout_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) module_exit(nfs4blocklayout_exit);