Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * inode.c - NILFS inode operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Written by Ryusuke Konishi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/mpage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "nilfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "btnode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "page.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "mdt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "cpfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "ifile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * struct nilfs_iget_args - arguments used during comparison between inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * @ino: inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * @cno: checkpoint number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * @root: pointer on NILFS root object (mounted checkpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * @for_gc: inode for GC flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) struct nilfs_iget_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	__u64 cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	struct nilfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	int for_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static int nilfs_iget_test(struct inode *inode, void *opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) void nilfs_inode_add_blocks(struct inode *inode, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct nilfs_root *root = NILFS_I(inode)->i_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	inode_add_bytes(inode, i_blocksize(inode) * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	if (root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		atomic64_add(n, &root->blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) void nilfs_inode_sub_blocks(struct inode *inode, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct nilfs_root *root = NILFS_I(inode)->i_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	inode_sub_bytes(inode, i_blocksize(inode) * n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	if (root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		atomic64_sub(n, &root->blocks_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * nilfs_get_block() - get a file block on the filesystem (callback function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * @inode - inode struct of the target file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * @blkoff - file block number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * @bh_result - buffer head to be mapped on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * @create - indicate whether allocating the block or not when it has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *      been allocated yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * This function does not issue actual read request of the specified data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * block. It is done by VFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) int nilfs_get_block(struct inode *inode, sector_t blkoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		    struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	__u64 blknum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	int err = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	if (ret >= 0) {	/* found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		map_bh(bh_result, inode->i_sb, blknum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			bh_result->b_size = (ret << inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	/* data block was not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	if (ret == -ENOENT && create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		bh_result->b_blocknr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 					(unsigned long)bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		if (unlikely(err != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			if (err == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 				 * The get_block() function could be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 				 * from multiple callers for an inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 				 * However, the page having this block must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 				 * be locked in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 				nilfs_warn(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 					   "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 					   __func__, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 					   (unsigned long long)blkoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 				err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			nilfs_transaction_abort(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		nilfs_mark_inode_dirty_sync(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		nilfs_transaction_commit(inode->i_sb); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		/* Error handling should be detailed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		set_buffer_delay(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		map_bh(bh_result, inode->i_sb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		/* Disk block number must be changed to proper value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	} else if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		 * not found is not error (e.g. hole); must return without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		 * the mapped state flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * address_space_operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * @file - file struct of the file to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * @page - the page to be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static int nilfs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	return mpage_readpage(page, nilfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void nilfs_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	mpage_readahead(rac, nilfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static int nilfs_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			    struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if (sb_rdonly(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		nilfs_clear_dirty_pages(mapping, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (wbc->sync_mode == WB_SYNC_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 						    wbc->range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 						    wbc->range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (sb_rdonly(inode->i_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		 * It means that filesystem was remounted in read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		 * mode because of error or metadata corruption. But we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * have dirty pages that try to be flushed in background.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 * So, here we simply discard this dirty page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		nilfs_clear_dirty_page(page, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (wbc->sync_mode == WB_SYNC_ALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		err = nilfs_construct_segment(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	} else if (wbc->for_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static int nilfs_set_page_dirty(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	int ret = __set_page_dirty_nobuffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		unsigned int nr_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		struct buffer_head *bh, *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 * This page is locked by callers, and no other thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		 * concurrently marks its buffers dirty since they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		 * only dirtied through routines in fs/buffer.c in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		 * which call sites of mark_buffer_dirty are protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		 * by page lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		bh = head = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			/* Do not mark hole blocks dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			if (buffer_dirty(bh) || !buffer_mapped(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			set_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			nr_dirty++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		} while (bh = bh->b_this_page, bh != head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (nr_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			nilfs_set_file_dirty(inode, nr_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	} else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		nilfs_set_file_dirty(inode, nr_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) void nilfs_write_failed(struct address_space *mapping, loff_t to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (to > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		truncate_pagecache(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		nilfs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static int nilfs_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			     loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			     struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	err = block_write_begin(mapping, pos, len, flags, pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				nilfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		nilfs_write_failed(mapping, pos + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		nilfs_transaction_abort(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) static int nilfs_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			   loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			   struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	unsigned int start = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	unsigned int nr_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 						  start + copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	copied = generic_write_end(file, mapping, pos, len, copied, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 				   fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	nilfs_set_file_dirty(inode, nr_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	err = nilfs_transaction_commit(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return err ? : copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (iov_iter_rw(iter) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* Needs synchronization with the cleaner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) const struct address_space_operations nilfs_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	.writepage		= nilfs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	.readpage		= nilfs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	.writepages		= nilfs_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	.set_page_dirty		= nilfs_set_page_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	.readahead		= nilfs_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	.write_begin		= nilfs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	.write_end		= nilfs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* .releasepage		= nilfs_releasepage, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	.invalidatepage		= block_invalidatepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	.direct_IO		= nilfs_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	.is_partially_uptodate  = block_is_partially_uptodate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static int nilfs_insert_inode_locked(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				     struct nilfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				     unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct nilfs_iget_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct super_block *sb = dir->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct nilfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	ino_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	inode = new_inode(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (unlikely(!inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	mapping_set_gfp_mask(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	root = NILFS_I(dir)->i_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	ii->i_state = BIT(NILFS_I_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	ii->i_root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		goto failed_ifile_create_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	atomic64_inc(&root->inodes_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	inode_init_owner(inode, dir, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	inode->i_ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		err = nilfs_bmap_read(ii->i_bmap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			goto failed_after_creation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		set_bit(NILFS_I_BMAP, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		/* No lock is needed; iget() ensures it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	ii->i_flags = nilfs_mask_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* ii->i_file_acl = 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/* ii->i_dir_acl = 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	ii->i_dir_start_lookup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	nilfs_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	spin_lock(&nilfs->ns_next_gen_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	inode->i_generation = nilfs->ns_next_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	spin_unlock(&nilfs->ns_next_gen_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		goto failed_after_creation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	err = nilfs_init_acl(inode, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 * Never occur.  When supporting nilfs_init_acl(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		 * proper cancellation of above jobs should be considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		goto failed_after_creation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  failed_after_creation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	clear_nlink(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (inode->i_state & I_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	iput(inode);  /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		       * raw_inode will be deleted through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		       * nilfs_evict_inode().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		       */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  failed_ifile_create_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	make_bad_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) void nilfs_set_inode_flags(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	unsigned int flags = NILFS_I(inode)->i_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	unsigned int new_fl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (flags & FS_SYNC_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		new_fl |= S_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (flags & FS_APPEND_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		new_fl |= S_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (flags & FS_IMMUTABLE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		new_fl |= S_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (flags & FS_NOATIME_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		new_fl |= S_NOATIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (flags & FS_DIRSYNC_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		new_fl |= S_DIRSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			S_NOATIME | S_DIRSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) int nilfs_read_inode_common(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			    struct nilfs_inode *raw_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	inode->i_size = le64_to_cpu(raw_inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (inode->i_nlink == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		return -ESTALE; /* this inode is deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		0 : le32_to_cpu(raw_inode->i_dir_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	ii->i_dir_start_lookup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	    S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		set_bit(NILFS_I_BMAP, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		/* No lock is needed; iget() ensures it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static int __nilfs_read_inode(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			      struct nilfs_root *root, unsigned long ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			      struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	struct the_nilfs *nilfs = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct nilfs_inode *raw_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		goto bad_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	err = nilfs_read_inode_common(inode, raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		goto failed_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	if (S_ISREG(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		inode->i_op = &nilfs_file_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		inode->i_fop = &nilfs_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		inode->i_mapping->a_ops = &nilfs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	} else if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		inode->i_op = &nilfs_dir_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		inode->i_fop = &nilfs_dir_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		inode->i_mapping->a_ops = &nilfs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	} else if (S_ISLNK(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		inode->i_op = &nilfs_symlink_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		inode_nohighmem(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		inode->i_mapping->a_ops = &nilfs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		inode->i_op = &nilfs_special_inode_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		init_special_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			inode, inode->i_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	nilfs_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	mapping_set_gfp_mask(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  failed_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  bad_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static int nilfs_iget_test(struct inode *inode, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct nilfs_iget_args *args = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct nilfs_inode_info *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return !args->for_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	return args->for_gc && args->cno == ii->i_cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static int nilfs_iget_set(struct inode *inode, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct nilfs_iget_args *args = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	inode->i_ino = args->ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (args->for_gc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		NILFS_I(inode)->i_cno = args->cno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		NILFS_I(inode)->i_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (args->root && args->ino == NILFS_ROOT_INO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			nilfs_get_root(args->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		NILFS_I(inode)->i_root = args->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			    unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	struct nilfs_iget_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	return ilookup5(sb, ino, nilfs_iget_test, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 				unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	struct nilfs_iget_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			 unsigned long ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	inode = nilfs_iget_locked(sb, root, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (unlikely(!inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (!(inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	err = __nilfs_read_inode(sb, root, ino, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		iget_failed(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				__u64 cno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct nilfs_iget_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (unlikely(!inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (!(inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	err = nilfs_init_gcinode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		iget_failed(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) void nilfs_write_inode_common(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			      struct nilfs_inode *raw_inode, int has_bmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	raw_inode->i_size = cpu_to_le64(inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		/* zero-fill unused portion in the case of super root block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		raw_inode->i_xattr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		raw_inode->i_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		       nilfs->ns_inode_size - sizeof(*raw_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (has_bmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		nilfs_bmap_write(ii->i_bmap, raw_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		raw_inode->i_device_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * When extending inode, nilfs->ns_inode_size should be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * for substitutions of appended fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	ino_t ino = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	struct inode *ifile = ii->i_root->ifile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct nilfs_inode *raw_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (flags & I_DIRTY_DATASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	nilfs_write_inode_common(inode, raw_inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		 * XXX: call with has_bmap = 0 is a workaround to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 * deadlock of bmap.  This delays update of i_bmap to just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * before writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				unsigned long from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	__u64 b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (b < from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!ret || (ret == -ENOMEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		   ret, ii->vfs_inode.i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) void nilfs_truncate(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	unsigned long blkoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	unsigned int blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	blocksize = sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	nilfs_truncate_bmap(ii, blkoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	nilfs_mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	nilfs_set_file_dirty(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	nilfs_transaction_commit(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * May construct a logical segment and may fail in sync mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * But truncate has no return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static void nilfs_clear_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * Free resources allocated in nilfs_read_inode(), here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	BUG_ON(!list_empty(&ii->i_dirty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	brelse(ii->i_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	ii->i_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (nilfs_is_metadata_file_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		nilfs_mdt_clear(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		nilfs_bmap_clear(ii->i_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		nilfs_put_root(ii->i_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) void nilfs_evict_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		nilfs_clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	/* TODO: some of the following operations may fail.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	nilfs_truncate_bmap(ii, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	nilfs_mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		atomic64_dec(&ii->i_root->inodes_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	nilfs_clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	nilfs_transaction_commit(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * May construct a logical segment and may fail in sync mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * But delete_inode has no return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	err = setattr_prepare(dentry, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	err = nilfs_transaction_begin(sb, &ti, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if ((iattr->ia_valid & ATTR_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	    iattr->ia_size != i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		truncate_setsize(inode, iattr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		nilfs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	setattr_copy(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (iattr->ia_valid & ATTR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		err = nilfs_acl_chmod(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	return nilfs_transaction_commit(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	nilfs_transaction_abort(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) int nilfs_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct nilfs_root *root = NILFS_I(inode)->i_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if ((mask & MAY_WRITE) && root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return -EROFS; /* snapshot is not writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return generic_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (ii->i_bh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 						  inode->i_ino, pbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		if (ii->i_bh == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			ii->i_bh = *pbh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			brelse(*pbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			*pbh = ii->i_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		*pbh = ii->i_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	get_bh(*pbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) int nilfs_inode_dirty(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (!list_empty(&ii->i_dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			test_bit(NILFS_I_BUSY, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct nilfs_inode_info *ii = NILFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	spin_lock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		 * Because this routine may race with nilfs_dispose_list(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		 * we have to check NILFS_I_QUEUED here, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			 * This will happen when somebody is freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			 * this inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			nilfs_warn(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 				   "cannot set file dirty (ino=%lu): the file is being freed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 				   inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			return -EINVAL; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 					 * NILFS_I_DIRTY may remain for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 					 * freeing inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		set_bit(NILFS_I_QUEUED, &ii->i_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	spin_unlock(&nilfs->ns_inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct buffer_head *ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	err = nilfs_load_inode_block(inode, &ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		nilfs_warn(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			   "cannot mark inode dirty (ino=%lu): error %d loading inode block",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			   inode->i_ino, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	nilfs_update_inode(inode, ibh, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	mark_buffer_dirty(ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	brelse(ibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * @inode: inode of the file to be registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * nilfs_dirty_inode() loads a inode block containing the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * @inode and copies data from a nilfs_inode to a corresponding inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * entry in the inode block. This operation is excluded from the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * construction. This function can be called both as a single operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * and as a part of indivisible file operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) void nilfs_dirty_inode(struct inode *inode, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct nilfs_transaction_info ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (is_bad_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		nilfs_warn(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			   "tried to mark bad_inode dirty. ignored.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (mdi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		nilfs_mdt_mark_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	__nilfs_mark_inode_dirty(inode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	nilfs_transaction_commit(inode->i_sb); /* never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 __u64 start, __u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	__u64 logical = 0, phys = 0, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	__u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	loff_t isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	sector_t blkoff, end_blkoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	sector_t delalloc_blkoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	unsigned long delalloc_blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	int ret, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	blkoff = start >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	end_blkoff = (start + len - 1) >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 							&delalloc_blkoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		__u64 blkphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		unsigned int maxblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				/* End of the current extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				ret = fiemap_fill_next_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 					fieinfo, logical, phys, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			if (blkoff > end_blkoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			logical = blkoff << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			size = delalloc_blklen << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			blkoff = delalloc_blkoff + delalloc_blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			delalloc_blklen = nilfs_find_uncommitted_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				inode, blkoff, &delalloc_blkoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		 * Limit the number of blocks that we look up so as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		 * not to get into the next delayed allocation extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		maxblocks = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		if (delalloc_blklen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 					  maxblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		blkphy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		n = nilfs_bmap_lookup_contig(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (n < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			int past_eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			if (unlikely(n != -ENOENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				break; /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			/* HOLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			blkoff++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			past_eof = ((blkoff << blkbits) >= isize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				/* End of the current extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				if (past_eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 					flags |= FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				ret = fiemap_fill_next_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 					fieinfo, logical, phys, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 				size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			if (blkoff > end_blkoff || past_eof)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				if (phys && blkphy << blkbits == phys + size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 					/* The current extent goes on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 					size += n << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 					/* Terminate the current extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 					ret = fiemap_fill_next_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 						fieinfo, logical, phys, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 						flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 					if (ret || blkoff > end_blkoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 					/* Start another extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					flags = FIEMAP_EXTENT_MERGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 					logical = blkoff << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 					phys = blkphy << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 					size = n << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				/* Start a new extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				flags = FIEMAP_EXTENT_MERGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				logical = blkoff << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				phys = blkphy << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				size = n << blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			blkoff += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	} while (true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	/* If ret is 1 then we just hit the end of the extent array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }