Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * fs/f2fs/file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *             http://www.samsung.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/falloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/nls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "gc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <uapi/linux/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	ret = filemap_fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 							F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	bool need_alloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (unlikely(IS_IMMUTABLE(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	if (!f2fs_is_checkpoint_ready(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	if (f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		int ret = f2fs_is_compressed_cluster(inode, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 			err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		} else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			need_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	/* should do out of any locked page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	if (need_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	sb_start_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	file_update_time(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	if (unlikely(page->mapping != inode->i_mapping ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			page_offset(page) > i_size_read(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 			!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		goto out_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (need_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		/* block allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		err = f2fs_get_block(&dn, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	if (!need_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		goto out_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	f2fs_wait_on_page_writeback(page, DATA, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	/* wait for GCed page writeback via META_MAPPING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * check to see if the page is mapped already (no holes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (PageMappedToDisk(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		goto out_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/* page is wholly or partially inside EOF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 						i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		offset = i_size_read(inode) & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		zero_user_segment(page, offset, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (!PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	trace_f2fs_vm_page_mkwrite(page, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) out_sem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	sb_end_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	return block_page_mkwrite_return(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static const struct vm_operations_struct f2fs_file_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	.fault		= f2fs_filemap_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	.map_pages	= filemap_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	.page_mkwrite	= f2fs_vm_page_mkwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	.allow_speculation = filemap_allow_speculation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static int get_parent_ino(struct inode *inode, nid_t *pino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 * Make sure to get the non-deleted alias.  The alias associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 * the open file descriptor being fsync()'ed may be deleted already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	dentry = d_find_alias(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (!dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	*pino = parent_ino(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		cp_reason = CP_NON_REGULAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	else if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		cp_reason = CP_COMPRESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	else if (inode->i_nlink != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		cp_reason = CP_HARDLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		cp_reason = CP_SB_NEED_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	else if (file_wrong_pino(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		cp_reason = CP_WRONG_PINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	else if (!f2fs_space_for_roll_forward(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		cp_reason = CP_NO_SPC_ROLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		cp_reason = CP_NODE_NEED_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	else if (test_opt(sbi, FASTBOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		cp_reason = CP_FASTBOOT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	else if (F2FS_OPTION(sbi).active_logs == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		cp_reason = CP_SPEC_LOG_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 							TRANS_DIR_INO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		cp_reason = CP_RECOVER_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	return cp_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/* But we need to avoid that there are some inode updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	f2fs_put_page(i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static void try_to_fix_pino(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	nid_t pino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	f2fs_down_write(&fi->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			get_parent_ino(inode, &pino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		f2fs_i_pino_write(inode, pino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		file_got_pino(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	f2fs_up_write(&fi->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) static bool f2fs_update_fsync_count(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 					unsigned int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct sysinfo val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	unsigned long avail_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	si_meminfo(&val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	/* only uses low memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	avail_ram = val.totalram - val.totalhigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	avail_ram = (avail_ram * DEF_RAM_THRESHOLD) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if ((atomic_read(&sbi->no_cp_fsync_pages) + npages) > avail_ram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	atomic_add(npages, &sbi->no_cp_fsync_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 						int datasync, bool atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	nid_t ino = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	unsigned int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	enum cp_reason_type cp_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	struct writeback_control wbc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		.sync_mode = WB_SYNC_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		.nr_to_write = LONG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		.for_reclaim = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	unsigned int seq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (unlikely(f2fs_readonly(inode->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	trace_f2fs_sync_file_enter(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		goto go_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* if fdatasync is triggered, let's do in-place-update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	npages = get_dirty_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		set_inode_flag(inode, FI_NEED_IPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	ret = file_write_and_wait_range(file, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	clear_inode_flag(inode, FI_NEED_IPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* if the inode is dirty, let's recover all the time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (!f2fs_skip_inode_update(inode, datasync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		f2fs_write_inode(inode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		goto go_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * if there is no written data, don't waste time to write recovery info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		/* it may call write_inode just prior to fsync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		if (need_inode_page_update(sbi, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			goto go_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			goto flush_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) go_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 * Both of fdatasync() and fsync() are able to be recovered from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * sudden-power-off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	f2fs_down_read(&F2FS_I(inode)->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	cp_reason = need_do_checkpoint(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	f2fs_up_read(&F2FS_I(inode)->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (cp_reason || !f2fs_update_fsync_count(sbi, npages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		/* all the dirty node pages should be flushed for POR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		ret = f2fs_sync_fs(inode->i_sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 * We've secured consistency through sync_fs. Following pino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		 * will be used only for fsynced inodes after checkpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		try_to_fix_pino(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		clear_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		clear_inode_flag(inode, FI_UPDATE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) sync_nodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	atomic_inc(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	atomic_dec(&sbi->wb_sync_req[NODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	/* if cp_error was enabled, we should avoid infinite loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	if (f2fs_need_inode_block_update(sbi, ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		f2fs_write_inode(inode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		goto sync_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * If it's atomic_write, it's just fine to keep write ordering. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * here we don't need to wait for node write completion, since we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * node chain which serializes node blocks. If one of node writes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 * reordered, we can see simply broken chain, resulting in stopping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	 * roll-forward recovery. It means we'll recover all or none node blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	 * given fsync mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (!atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* once recovery info is written, don't need to tack this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	clear_inode_flag(inode, FI_APPEND_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) flush_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		ret = f2fs_issue_flush(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		clear_inode_flag(inode, FI_UPDATE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	return f2fs_do_sync_file(file, start, end, datasync, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static bool __found_offset(struct address_space *mapping, block_t blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				pgoff_t index, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	switch (whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	case SEEK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		if (__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		if (blkaddr == NEW_ADDR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	case SEEK_HOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		if (blkaddr == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	loff_t maxbytes = inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	pgoff_t pgofs, end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	loff_t data_ofs = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	loff_t isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	isize = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	if (offset >= isize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	/* handle inline data case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		if (whence == SEEK_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			data_ofs = isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		} else if (whence == SEEK_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			data_ofs = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		if (err && err != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		} else if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			/* direct node does not exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			if (whence == SEEK_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		/* find data/hole in dnode block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		for (; dn.ofs_in_node < end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				dn.ofs_in_node++, pgofs++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			if (__is_valid_data_blkaddr(blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 					blkaddr, DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			if (__found_offset(file->f_mapping, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 							pgofs, whence)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (whence == SEEK_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (whence == SEEK_HOLE && data_ofs > isize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		data_ofs = isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return vfs_setpos(file, data_ofs, maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	loff_t maxbytes = inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	switch (whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	case SEEK_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	case SEEK_CUR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	case SEEK_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return generic_file_llseek_size(file, offset, whence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 						maxbytes, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	case SEEK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	case SEEK_HOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return f2fs_seek_block(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	file_accessed(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	vma->vm_ops = &f2fs_file_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	set_inode_flag(inode, FI_MMAP_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static int f2fs_file_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	int err = fscrypt_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	err = fsverity_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	filp->f_mode |= FMODE_NOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	return dquot_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct f2fs_node *raw_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	__le32 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	int base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	bool compressed_cluster = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	int cluster_index = 0, valid_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		base = get_extra_isize(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	raw_node = F2FS_NODE(dn->node_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	addr = blkaddr_in_node(raw_node) + base + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	/* Assumption: truncateion starts with cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		block_t blkaddr = le32_to_cpu(*addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		if (f2fs_compressed_file(dn->inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 					!(cluster_index & (cluster_size - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			if (compressed_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 				f2fs_i_compr_blocks_update(dn->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 							valid_blocks, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			compressed_cluster = (blkaddr == COMPRESS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			valid_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		if (blkaddr == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		dn->data_blkaddr = NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		f2fs_set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		if (__is_valid_data_blkaddr(blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 					DATA_GENERIC_ENHANCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			if (compressed_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				valid_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		f2fs_invalidate_blocks(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (!released || blkaddr != COMPRESS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			nr_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (compressed_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (nr_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		pgoff_t fofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		 * we will invalidate all blkaddr in the whole range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 							dn->inode) + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		dec_valid_block_count(sbi, dn->inode, nr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	dn->ofs_in_node = ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 					 dn->ofs_in_node, nr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static int truncate_partial_data_page(struct inode *inode, u64 from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 								bool cache_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	loff_t offset = from & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	pgoff_t index = from >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (!offset && !cache_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (cache_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		page = find_lock_page(mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (page && PageUptodate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			goto truncate_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	page = f2fs_get_lock_data_page(inode, index, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) truncate_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	zero_user(page, offset, PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* An encrypted inode should have a key and truncate the last page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (!cache_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	pgoff_t free_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	int count = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	bool truncate_page = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	trace_f2fs_truncate_blocks_enter(inode, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (free_from >= max_file_blocks(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		goto free_partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (IS_ERR(ipage)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		err = PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (f2fs_has_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		f2fs_truncate_inline_inode(inode, ipage, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		truncate_page = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	set_new_dnode(&dn, inode, ipage, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			goto free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	count = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	count -= dn.ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	f2fs_bug_on(sbi, count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		f2fs_truncate_data_blocks_range(&dn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		free_from += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) free_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	err = f2fs_truncate_inode_blocks(inode, free_from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) free_partial:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* lastly zero out the first data page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		err = truncate_partial_data_page(inode, from, truncate_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	trace_f2fs_truncate_blocks_exit(inode, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	u64 free_from = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * for compressed file, only support cluster size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * aligned truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		free_from = round_up(from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	err = f2fs_do_truncate_blocks(inode, free_from, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (from != free_from) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		err = f2fs_truncate_partial_cluster(inode, from, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) int f2fs_truncate(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				S_ISLNK(inode->i_mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	trace_f2fs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	err = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	/* we should check inline_data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (!f2fs_may_inline_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	f2fs_mark_inode_dirty_sync(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) int f2fs_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		 u32 request_mask, unsigned int query_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct inode *inode = d_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct f2fs_inode *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (f2fs_has_extra_attr(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		stat->result_mask |= STATX_BTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	flags = fi->i_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (flags & F2FS_COMPR_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		stat->attributes |= STATX_ATTR_COMPRESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (flags & F2FS_APPEND_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		stat->attributes |= STATX_ATTR_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (IS_ENCRYPTED(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		stat->attributes |= STATX_ATTR_ENCRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (flags & F2FS_IMMUTABLE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		stat->attributes |= STATX_ATTR_IMMUTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (flags & F2FS_NODUMP_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		stat->attributes |= STATX_ATTR_NODUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (IS_VERITY(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		stat->attributes |= STATX_ATTR_VERITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				  STATX_ATTR_APPEND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				  STATX_ATTR_ENCRYPTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				  STATX_ATTR_IMMUTABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				  STATX_ATTR_NODUMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				  STATX_ATTR_VERITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	generic_fillattr(inode, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* we need to show initial sectors used for inline_data/dentries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 					f2fs_has_inline_dentry(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		stat->blocks += (stat->size + 511) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) #ifdef CONFIG_F2FS_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static void __setattr_copy(struct inode *inode, const struct iattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	unsigned int ia_valid = attr->ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (ia_valid & ATTR_UID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		inode->i_uid = attr->ia_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (ia_valid & ATTR_GID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		inode->i_gid = attr->ia_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (ia_valid & ATTR_ATIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		inode->i_atime = attr->ia_atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (ia_valid & ATTR_MTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		inode->i_mtime = attr->ia_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (ia_valid & ATTR_CTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		inode->i_ctime = attr->ia_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (ia_valid & ATTR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		umode_t mode = attr->ia_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		if (!in_group_p(inode->i_gid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			mode &= ~S_ISGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		set_acl_inode(inode, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) #define __setattr_copy setattr_copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (unlikely(IS_IMMUTABLE(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (unlikely(IS_APPEND(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 				  ATTR_GID | ATTR_TIMES_SET))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if ((attr->ia_valid & ATTR_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	err = setattr_prepare(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	err = fscrypt_prepare_setattr(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	err = fsverity_prepare_setattr(dentry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (is_quota_modification(inode, attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		err = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if ((attr->ia_valid & ATTR_UID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		(attr->ia_valid & ATTR_GID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		!gid_eq(attr->ia_gid, inode->i_gid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		f2fs_lock_op(F2FS_I_SB(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		err = dquot_transfer(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			set_sbi_flag(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 					SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			f2fs_unlock_op(F2FS_I_SB(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		 * update uid/gid under lock_op(), so that dquot and inode can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		 * be updated atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		if (attr->ia_valid & ATTR_UID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			inode->i_uid = attr->ia_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		if (attr->ia_valid & ATTR_GID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			inode->i_gid = attr->ia_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		f2fs_unlock_op(F2FS_I_SB(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (attr->ia_valid & ATTR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		loff_t old_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			 * should convert inline inode before i_size_write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			 * keep smaller than inline_data size with inline flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		truncate_setsize(inode, attr->ia_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		if (attr->ia_size <= old_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			err = f2fs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		 * do not trim all blocks after i_size if target size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		 * larger than i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		spin_lock(&F2FS_I(inode)->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		spin_unlock(&F2FS_I(inode)->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	__setattr_copy(inode, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	if (attr->ia_valid & ATTR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		if (is_inode_flag_set(inode, FI_ACL_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				inode->i_mode = F2FS_I(inode)->i_acl_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			clear_inode_flag(inode, FI_ACL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	/* file size may changed here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/* inode change will produce dirty node pages flushed by checkpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) const struct inode_operations f2fs_file_inode_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	.getattr	= f2fs_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	.setattr	= f2fs_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	.get_acl	= f2fs_get_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	.set_acl	= f2fs_set_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	.listxattr	= f2fs_listxattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	.fiemap		= f2fs_fiemap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static int fill_zero(struct inode *inode, pgoff_t index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					loff_t start, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	page = f2fs_get_new_data_page(inode, NULL, index, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	f2fs_wait_on_page_writeback(page, DATA, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	zero_user(page, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	while (pg_start < pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		pgoff_t end_offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				pg_start = f2fs_get_next_page_offset(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 								pg_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		f2fs_truncate_data_blocks_range(&dn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		pg_start += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	pgoff_t pg_start, pg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	loff_t off_start, off_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	off_start = offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	off_end = (offset + len) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (pg_start == pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		ret = fill_zero(inode, pg_start, off_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 						off_end - off_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		if (off_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			ret = fill_zero(inode, pg_start++, off_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 						PAGE_SIZE - off_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		if (off_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			ret = fill_zero(inode, pg_end, 0, off_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		if (pg_start < pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			loff_t blk_start, blk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			truncate_pagecache_range(inode, blk_start, blk_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				int *do_replace, pgoff_t off, pgoff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	int ret, done, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) next_dnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if (ret && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	} else if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if (dn.max_level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 						dn.ofs_in_node, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		blkaddr += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		do_replace += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 							dn.ofs_in_node, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		*blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		if (__is_valid_data_blkaddr(*blkaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 					DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			if (f2fs_lfs_mode(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			/* do not invalidate this block address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			*do_replace = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	len -= done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	off += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		goto next_dnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 				int *do_replace, pgoff_t off, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		if (*do_replace == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			dec_valid_block_count(sbi, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			f2fs_invalidate_blocks(sbi, *blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			f2fs_update_data_blkaddr(&dn, *blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			block_t *blkaddr, int *do_replace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	pgoff_t i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	while (i < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		if (blkaddr[i] == NULL_ADDR && !full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			struct node_info ni;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			size_t new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			pgoff_t ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 				f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			ilen = min((pgoff_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 						dn.ofs_in_node, len - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 				f2fs_truncate_data_blocks_range(&dn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				if (do_replace[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 					f2fs_i_blocks_write(src_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 							1, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					f2fs_i_blocks_write(dst_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 							1, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 					blkaddr[i], ni.version, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 					do_replace[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				dn.ofs_in_node++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				if (dst_inode->i_size < new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 					f2fs_i_size_write(dst_inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			struct page *psrc, *pdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			psrc = f2fs_get_lock_data_page(src_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 							src + i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			if (IS_ERR(psrc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 				return PTR_ERR(psrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 								true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			if (IS_ERR(pdst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				f2fs_put_page(psrc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				return PTR_ERR(pdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			f2fs_copy_page(psrc, pdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			set_page_dirty(pdst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			f2fs_put_page(pdst, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			f2fs_put_page(psrc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			ret = f2fs_truncate_hole(src_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 						src + i, src + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static int __exchange_data_block(struct inode *src_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			pgoff_t len, bool full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	block_t *src_blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	int *do_replace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	pgoff_t olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 					array_size(olen, sizeof(block_t)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 					GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		if (!src_blkaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 					array_size(olen, sizeof(int)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 					GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		if (!do_replace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			kvfree(src_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 					do_replace, src, olen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			goto roll_back;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 					do_replace, src, dst, olen, full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			goto roll_back;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		src += olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		dst += olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		len -= olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		kvfree(src_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		kvfree(do_replace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) roll_back:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	kvfree(src_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	kvfree(do_replace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	pgoff_t start = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/* avoid gc operation during block exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	f2fs_drop_extent_tree(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	truncate_pagecache(inode, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	loff_t new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (offset + len >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	/* collapse range should be aligned to block size of f2fs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	/* write out all dirty pages from offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	ret = f2fs_do_collapse(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	/* write out all moved pages, if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	truncate_pagecache(inode, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	new_size = i_size_read(inode) - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	ret = f2fs_truncate_blocks(inode, new_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		f2fs_i_size_write(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 								pgoff_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	pgoff_t index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	unsigned int ofs_in_node = dn->ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	blkcnt_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	for (; index < end; index++, dn->ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	dn->ofs_in_node = ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	ret = f2fs_reserve_new_blocks(dn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	dn->ofs_in_node = ofs_in_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	for (index = start; index < end; index++, dn->ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		dn->data_blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		 * f2fs_reserve_new_blocks will not guarantee entire block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		 * allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		if (dn->data_blkaddr == NULL_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		if (dn->data_blkaddr != NEW_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			dn->data_blkaddr = NEW_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			f2fs_set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 								int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	pgoff_t index, pg_start, pg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	loff_t new_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	loff_t off_start, off_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	ret = inode_newsize_ok(inode, (len + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	off_start = offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	off_end = (offset + len) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (pg_start == pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		ret = fill_zero(inode, pg_start, off_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 						off_end - off_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		new_size = max_t(loff_t, new_size, offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		if (off_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			ret = fill_zero(inode, pg_start++, off_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 						PAGE_SIZE - off_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			new_size = max_t(loff_t, new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 					(loff_t)pg_start << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		for (index = pg_start; index < pg_end;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			unsigned int end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			pgoff_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			truncate_pagecache_range(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 				(loff_t)index << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 				((loff_t)pg_end << PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 				f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			ret = f2fs_do_zero_range(&dn, index, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			f2fs_balance_fs(sbi, dn.node_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			index = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			new_size = max_t(loff_t, new_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 					(loff_t)index << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		if (off_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			ret = fill_zero(inode, pg_end, 0, off_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			new_size = max_t(loff_t, new_size, offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (new_size > i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		if (mode & FALLOC_FL_KEEP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			file_set_keep_isize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			f2fs_i_size_write(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	pgoff_t nr, pg_start, pg_end, delta, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	loff_t new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	new_size = i_size_read(inode) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	ret = inode_newsize_ok(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	if (offset >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	/* insert range should be aligned to block size of f2fs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	/* write out all dirty pages from offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	pg_start = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	pg_end = (offset + len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	delta = pg_end - pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/* avoid gc operation during block exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	truncate_pagecache(inode, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	while (!ret && idx > pg_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		nr = idx - pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		if (nr > delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 			nr = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		idx -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		f2fs_drop_extent_tree(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		ret = __exchange_data_block(inode, inode, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 					idx + delta, nr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	/* write out all moved pages, if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	truncate_pagecache(inode, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		f2fs_i_size_write(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static int expand_inode_data(struct inode *inode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 					loff_t len, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			.m_may_create = true };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	pgoff_t pg_start, pg_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	loff_t new_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	loff_t off_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	block_t expanded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	err = inode_newsize_ok(inode, (len + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	off_end = (offset + len) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	map.m_lblk = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	map.m_len = pg_end - pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (off_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		map.m_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	if (!map.m_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	if (f2fs_is_pinned_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		block_t sec_blks = BLKS_PER_SEC(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		block_t sec_len = roundup(map.m_len, sec_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		map.m_len = sec_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) next_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if (has_not_enough_free_secs(sbi, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			if (err && err != -ENODATA && err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 				goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		f2fs_down_write(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		f2fs_up_write(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		expanded += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		sec_len -= map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		map.m_lblk += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		if (!err && sec_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			goto next_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		map.m_len = expanded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		expanded = map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		pgoff_t last_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		if (!expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		last_off = pg_start + expanded - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		/* update new size to the failed position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		new_size = (last_off == pg_end) ? offset + len :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 					(loff_t)(last_off + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	if (new_size > i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		if (mode & FALLOC_FL_KEEP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			file_set_keep_isize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			f2fs_i_size_write(inode, new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static long f2fs_fallocate(struct file *file, int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 				loff_t offset, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	/* f2fs only support ->fallocate for regular file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (IS_ENCRYPTED(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (f2fs_compressed_file(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			FALLOC_FL_INSERT_RANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	if (mode & FALLOC_FL_PUNCH_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		if (offset >= inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		ret = punch_hole(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		ret = f2fs_collapse_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		ret = f2fs_zero_range(inode, offset, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		ret = f2fs_insert_range(inode, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		ret = expand_inode_data(inode, offset, len, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		f2fs_mark_inode_dirty_sync(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int f2fs_release_file(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	 * f2fs_relase_file is called at every close calls. So we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	 * not drop any inmemory pages by close called by other process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	if (!(filp->f_mode & FMODE_WRITE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			atomic_read(&inode->i_writecount) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	/* some remained atomic pages should discarded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		f2fs_drop_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	if (f2fs_is_volatile_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		set_inode_flag(inode, FI_DROP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		clear_inode_flag(inode, FI_DROP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		clear_inode_flag(inode, FI_VOLATILE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		stat_dec_volatile_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static int f2fs_file_flush(struct file *file, fl_owner_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	 * If the process doing a transaction is crashed, we should do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	 * roll-back. Otherwise, other reader/write can see corrupted database
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	 * until all the writers close its file. Since this should be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	 * before dropping file lock, it needs to do in ->flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	if (f2fs_is_atomic_file(inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			F2FS_I(inode)->inmem_task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		f2fs_drop_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	u32 masked_flags = fi->i_flags & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	/* mask can be shrunk by flags_valid selector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	iflags &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	/* Is it quota file? Do not allow user to mess with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (!f2fs_empty_dir(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			return -ENOTEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		if (masked_flags & F2FS_COMPR_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			if (!f2fs_disable_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		if (iflags & F2FS_NOCOMP_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		if (iflags & F2FS_COMPR_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 			if (!f2fs_may_compress(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			if (S_ISREG(inode->i_mode) && inode->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			set_compress_context(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		if (masked_flags & F2FS_COMPR_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	fi->i_flags = iflags | (fi->i_flags & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 					(fi->i_flags & F2FS_NOCOMP_FL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		set_inode_flag(inode, FI_PROJ_INHERIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		clear_inode_flag(inode, FI_PROJ_INHERIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	f2fs_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	u32 iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	u32 fsflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) } f2fs_fsflags_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	{ F2FS_COMPR_FL,	FS_COMPR_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #define F2FS_GETTABLE_FS_FL (		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		FS_COMPR_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		FS_SYNC_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		FS_IMMUTABLE_FL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		FS_APPEND_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		FS_NODUMP_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		FS_NOATIME_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		FS_NOCOMP_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		FS_INDEX_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		FS_DIRSYNC_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		FS_PROJINHERIT_FL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		FS_ENCRYPT_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		FS_INLINE_DATA_FL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		FS_NOCOW_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		FS_VERITY_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		FS_CASEFOLD_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) #define F2FS_SETTABLE_FS_FL (		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		FS_COMPR_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		FS_SYNC_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		FS_IMMUTABLE_FL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		FS_APPEND_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		FS_NODUMP_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		FS_NOATIME_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		FS_NOCOMP_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		FS_DIRSYNC_FL |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		FS_PROJINHERIT_FL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		FS_CASEFOLD_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	u32 fsflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		if (iflags & f2fs_fsflags_map[i].iflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			fsflags |= f2fs_fsflags_map[i].fsflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	return fsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	u32 iflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (fsflags & f2fs_fsflags_map[i].fsflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			iflags |= f2fs_fsflags_map[i].iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	return iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	if (IS_ENCRYPTED(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		fsflags |= FS_ENCRYPT_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	if (IS_VERITY(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		fsflags |= FS_VERITY_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		fsflags |= FS_INLINE_DATA_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	if (is_inode_flag_set(inode, FI_PIN_FILE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		fsflags |= FS_NOCOW_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	fsflags &= F2FS_GETTABLE_FS_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	return put_user(fsflags, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	u32 fsflags, old_fsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	u32 iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	if (get_user(fsflags, (int __user *)arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	fsflags &= F2FS_SETTABLE_FS_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	iflags = f2fs_fsflags_to_iflags(fsflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	ret = f2fs_setflags_common(inode, iflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	return put_user(inode->i_generation, (int __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static int f2fs_ioc_start_atomic_write(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if (filp->f_flags & O_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (!f2fs_disable_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	if (f2fs_is_atomic_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 * f2fs_is_atomic_file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if (get_dirty_pages(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 			  inode->i_ino, get_dirty_pages(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	if (list_empty(&fi->inmem_ilist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	sbi->atomic_files++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	/* add inode in inmem_list first and set atomic_file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	set_inode_flag(inode, FI_ATOMIC_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	F2FS_I(inode)->inmem_task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	stat_update_max_atomic_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) static int f2fs_ioc_commit_atomic_write(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (f2fs_is_volatile_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	if (f2fs_is_atomic_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		ret = f2fs_commit_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			f2fs_drop_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static int f2fs_ioc_start_volatile_write(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (f2fs_is_volatile_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	stat_inc_volatile_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	stat_update_max_volatile_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	set_inode_flag(inode, FI_VOLATILE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static int f2fs_ioc_release_volatile_write(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	if (!f2fs_is_volatile_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	if (!f2fs_is_first_block_written(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		ret = truncate_partial_data_page(inode, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) static int f2fs_ioc_abort_volatile_write(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	if (f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		f2fs_drop_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	if (f2fs_is_volatile_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		clear_inode_flag(inode, FI_VOLATILE_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		stat_dec_volatile_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	struct super_block *sb = sbi->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	__u32 in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	if (get_user(in, (__u32 __user *)arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			if (ret == -EROFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 				f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 				trace_f2fs_shutdown(sbi, in, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	switch (in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	case F2FS_GOING_DOWN_FULLSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		ret = freeze_bdev(sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		thaw_bdev(sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	case F2FS_GOING_DOWN_METASYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		/* do checkpoint only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		ret = f2fs_sync_fs(sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	case F2FS_GOING_DOWN_NOSYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	case F2FS_GOING_DOWN_METAFLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		f2fs_stop_checkpoint(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	case F2FS_GOING_DOWN_NEED_FSCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		set_sbi_flag(sbi, SBI_IS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		/* do checkpoint only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		ret = f2fs_sync_fs(sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	f2fs_stop_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	f2fs_stop_discard_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	f2fs_drop_discard_cmd(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	clear_opt(sbi, DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	if (in != F2FS_GOING_DOWN_FULLSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	trace_f2fs_shutdown(sbi, in, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	struct fstrim_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 				sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	range.minlen = max((unsigned int)range.minlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 				q->limits.discard_granularity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 				sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static bool uuid_is_nonzero(__u8 u[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		if (u[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	if (!f2fs_sb_has_encrypt(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	err = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	f2fs_down_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	/* update superblock with uuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	err = f2fs_commit_super(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		/* undo new data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 									16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	f2fs_up_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 					     unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 						    unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) static int f2fs_ioc_get_encryption_key_status(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 					      unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	__u32 sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	if (get_user(sync, (__u32 __user *)arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	if (!sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	end = range->start + range->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 					end >= MAX_BLKADDR(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) do_more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	if (!range->sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	ret = f2fs_gc(sbi, range->sync, true, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 				GET_SEGNO(sbi, range->start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		if (ret == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	range->start += BLKS_PER_SEC(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	if (range->start <= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		goto do_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	struct f2fs_gc_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 							sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	return __f2fs_ioc_gc_range(filp, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	ret = f2fs_sync_fs(sbi->sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 					struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 					struct f2fs_defragment *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 					.m_seg_type = NO_CHECK_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 					.m_may_create = false };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	struct extent_info ei = {0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	pgoff_t pg_start, pg_end, next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	unsigned int blk_per_seg = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	unsigned int total = 0, sec_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	block_t blk_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	bool fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	/* if in-place-update policy is enabled, don't waste time here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (f2fs_should_update_inplace(inode, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	pg_start = range->start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	/* writeback all dirty pages in the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 						range->start + range->len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	 * lookup mapping info in extent cache, skip defragmenting if physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	 * block addresses are continuous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		if (ei.fofs + ei.len >= pg_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	map.m_lblk = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	map.m_next_pgofs = &next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	 * lookup mapping info in dnode page cache, skip defragmenting if all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	 * physical block addresses are continuous even if there are hole(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	 * in logical blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	while (map.m_lblk < pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 		map.m_len = pg_end - map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 			map.m_lblk = next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		if (blk_end && blk_end != map.m_pblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 			fragmented = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		/* record total count of block that we're going to move */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		total += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		blk_end = map.m_pblk + map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		map.m_lblk += map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	if (!fragmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	 * make sure there are enough free section for LFS allocation, this can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 * avoid defragment running in SSR mode when free section are allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	 * intensively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	map.m_lblk = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	map.m_len = pg_end - pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	while (map.m_lblk < pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		pgoff_t idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) do_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		map.m_len = pg_end - map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 			goto clear_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 			map.m_lblk = next_pgofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			goto check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		set_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		idx = map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 			struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 			page = f2fs_get_lock_data_page(inode, idx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 				err = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 				goto clear_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		map.m_lblk = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		if (map.m_lblk < pg_end && cnt < blk_per_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 			goto do_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		clear_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		err = filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) clear_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	clear_inode_flag(inode, FI_DO_DEFRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		range->len = (u64)total << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	struct f2fs_defragment range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 							sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	/* verify alignment of offset & size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 					max_file_blocks(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	err = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	err = f2fs_defragment_range(sbi, filp, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 							sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 			struct file *file_out, loff_t pos_out, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	struct inode *src = file_inode(file_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	struct inode *dst = file_inode(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	size_t olen = len, dst_max_i_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	size_t dst_osize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 				src->i_sb != dst->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	if (unlikely(f2fs_readonly(src->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	if (pos_out < 0 || pos_in < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	if (src == dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		if (pos_in == pos_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		if (pos_out > pos_in && pos_out < pos_in + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	inode_lock(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	if (src != dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		if (!inode_trylock(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 		olen = len = src->i_size - pos_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	if (pos_in + len == src->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	dst_osize = dst->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	if (pos_out + olen > dst->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 		dst_max_i_size = pos_out + olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	/* verify the end result is block aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	ret = f2fs_convert_inline_inode(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	ret = f2fs_convert_inline_inode(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	/* write out all dirty pages from offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	ret = filemap_write_and_wait_range(src->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 					pos_in, pos_in + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	ret = filemap_write_and_wait_range(dst->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 					pos_out, pos_out + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	f2fs_balance_fs(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	if (src != dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 			goto out_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 				pos_out >> F2FS_BLKSIZE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 				len >> F2FS_BLKSIZE_BITS, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		if (dst_max_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			f2fs_i_size_write(dst, dst_max_i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		else if (dst_osize != dst->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 			f2fs_i_size_write(dst, dst_osize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	if (src != dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) out_src:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	if (src != dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		inode_unlock(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	inode_unlock(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) static int __f2fs_ioc_move_range(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 				struct f2fs_move_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	struct fd dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	if (!(filp->f_mode & FMODE_READ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 			!(filp->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	dst = fdget(range->dst_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	if (!dst.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	if (!(dst.file->f_mode & FMODE_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		err = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	err = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 					range->pos_out, range->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	fdput(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	struct f2fs_move_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 							sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	return __f2fs_ioc_move_range(filp, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	struct sit_info *sm = SIT_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	unsigned int start_segno = 0, end_segno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	struct f2fs_flush_device range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 							sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			__is_large_section(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	if (range.dev_num != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	start_segno = sm->last_victim[FLUSH_DEVICE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		start_segno = dev_start_segno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	end_segno = min(start_segno + range.segments, dev_end_segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	while (start_segno < end_segno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		sm->last_victim[GC_CB] = end_segno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		sm->last_victim[GC_GREEDY] = end_segno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		ret = f2fs_gc(sbi, true, true, true, start_segno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 		if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		start_segno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	/* Must validate to set it with SQLite behavior in Android. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	return put_user(sb_feature, (u32 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	struct dquot *transfer_to[MAXQUOTAS] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	struct super_block *sb = sbi->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		err = __dquot_transfer(inode, transfer_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		dqput(transfer_to[PRJQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	struct page *ipage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	kprojid_t kprojid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	if (!f2fs_sb_has_project_quota(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 		if (projid != F2FS_DEF_PROJID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	if (!f2fs_has_extra_attr(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	/* Is it quota file? Do not allow user to mess with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	if (IS_NOQUOTA(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 	if (IS_ERR(ipage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		return PTR_ERR(ipage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 								i_projid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		err = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 	f2fs_put_page(ipage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	err = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	err = f2fs_transfer_project_quota(inode, kprojid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	F2FS_I(inode)->i_projid = kprojid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	if (projid != F2FS_DEF_PROJID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	u32 iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	u32 xflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) } f2fs_xflags_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) #define F2FS_SUPPORTED_XFLAGS (		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 		FS_XFLAG_SYNC |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		FS_XFLAG_IMMUTABLE |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		FS_XFLAG_APPEND |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		FS_XFLAG_NODUMP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		FS_XFLAG_NOATIME |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		FS_XFLAG_PROJINHERIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) static inline u32 f2fs_iflags_to_xflags(u32 iflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	u32 xflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		if (iflags & f2fs_xflags_map[i].iflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 			xflags |= f2fs_xflags_map[i].xflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	return xflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) static inline u32 f2fs_xflags_to_iflags(u32 xflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	u32 iflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		if (xflags & f2fs_xflags_map[i].xflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 			iflags |= f2fs_xflags_map[i].iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	return iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	struct fsxattr fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	f2fs_fill_fsxattr(inode, &fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	struct fsxattr fa, old_fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	u32 iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	/* Make sure caller has proper permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	if (!inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	err = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	f2fs_fill_fsxattr(inode, &old_fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	err = f2fs_setflags_common(inode, iflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) int f2fs_pin_file_control(struct inode *inode, bool inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	/* Use i_gc_failures for normal file as a risk signal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	if (inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		f2fs_i_gc_failures_write(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 			  __func__, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 			  fi->i_gc_failures[GC_FAILURE_PIN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		clear_inode_flag(inode, FI_PIN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	__u32 pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	if (get_user(pin, (__u32 __user *)arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	if (!pin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		clear_inode_flag(inode, FI_PIN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		f2fs_i_gc_failures_write(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	if (f2fs_should_update_outplace(inode, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	if (f2fs_pin_file_control(inode, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	if (!f2fs_disable_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	set_inode_flag(inode, FI_PIN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	__u32 pin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	if (is_inode_flag_set(inode, FI_PIN_FILE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	return put_user(pin, (u32 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) int f2fs_precache_extents(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	struct f2fs_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	pgoff_t m_next_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	loff_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	map.m_lblk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	map.m_next_pgofs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	map.m_next_extent = &m_next_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	map.m_seg_type = NO_CHECK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	map.m_may_create = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	end = max_file_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	while (map.m_lblk < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		map.m_len = end - map.m_lblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		map.m_lblk = m_next_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	return f2fs_precache_extents(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	__u64 block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	if (copy_from_user(&block_count, (void __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 			   sizeof(block_count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	return f2fs_resize_fs(sbi, block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		f2fs_warn(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 			  inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	return fsverity_ioctl_enable(filp, (const void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	return fsverity_ioctl_measure(filp, (void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	char *vbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	if (!vbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	f2fs_down_read(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 			ARRAY_SIZE(sbi->raw_super->volume_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	f2fs_up_read(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	if (copy_to_user((char __user *)arg, vbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 				min(FSLABEL_MAX, count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	kfree(vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	char *vbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	if (IS_ERR(vbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		return PTR_ERR(vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	err = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	f2fs_down_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	memset(sbi->raw_super->volume_name, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 			sizeof(sbi->raw_super->volume_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 			sbi->raw_super->volume_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 			ARRAY_SIZE(sbi->raw_super->volume_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	err = f2fs_commit_super(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 	f2fs_up_write(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	kfree(vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	__u64 blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	return put_user(blocks, (u64 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	unsigned int released_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 						dn->ofs_in_node + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		if (!__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 					DATA_GENERIC_ENHANCE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 		int compr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 			blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 			if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 				if (blkaddr == COMPRESS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 				dn->ofs_in_node += cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 				goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 			if (__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 				compr_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			if (blkaddr != NEW_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 			dn->data_blkaddr = NULL_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 			f2fs_set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		dec_valid_block_count(sbi, dn->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 					cluster_size - compr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		released_blocks += cluster_size - compr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		count -= cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	return released_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	pgoff_t page_idx = 0, last_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	unsigned int released_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	int writecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	writecount = atomic_read(&inode->i_writecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	set_inode_flag(inode, FI_COMPRESS_RELEASED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	while (page_idx < last_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		pgoff_t end_offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 			if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 				page_idx = f2fs_get_next_page_offset(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 								page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		ret = release_compress_blocks(&dn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 		page_idx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 		released_blocks += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 		ret = put_user(released_blocks, (u64 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	} else if (released_blocks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 		set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 			"iblocks=%llu, released=%u, compr_blocks=%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 			"run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 			__func__, inode->i_ino, inode->i_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 			released_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	unsigned int reserved_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	block_t blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 						dn->ofs_in_node + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 		if (!__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 					DATA_GENERIC_ENHANCE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 		int compr_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 		blkcnt_t reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 			blkaddr = f2fs_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 				if (blkaddr == COMPRESS_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 				dn->ofs_in_node += cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 				goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 			if (__is_valid_data_blkaddr(blkaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 				compr_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 			dn->data_blkaddr = NEW_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 			f2fs_set_data_blkaddr(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 		reserved = cluster_size - compr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 		if (reserved != cluster_size - compr_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 			return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		reserved_blocks += reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		count -= cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	return reserved_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	pgoff_t page_idx = 0, last_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	unsigned int reserved_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	if (f2fs_readonly(sbi->sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 		return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	ret = mnt_want_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 	if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		goto unlock_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	while (page_idx < last_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 		pgoff_t end_offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 			if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 				page_idx = f2fs_get_next_page_offset(&dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 								page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		ret = reserve_compress_blocks(&dn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		page_idx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		reserved_blocks += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		clear_inode_flag(inode, FI_COMPRESS_RELEASED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) unlock_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	mnt_drop_write_file(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		ret = put_user(reserved_blocks, (u64 __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	} else if (reserved_blocks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 		set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 			"run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 			__func__, inode->i_ino, inode->i_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 			reserved_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		pgoff_t off, block_t block, block_t len, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	struct request_queue *q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	sector_t sector = SECTOR_FROM_BLOCK(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 	if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 	if (flags & F2FS_TRIM_FILE_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 						blk_queue_secure_erase(q) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 						BLKDEV_DISCARD_SECURE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		if (IS_ENCRYPTED(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 			ret = fscrypt_zeroout_range(inode, off, block, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 					GFP_NOFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 	struct block_device *prev_bdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	struct f2fs_sectrim_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	pgoff_t index, pg_end, prev_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	block_t prev_block = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	loff_t end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	bool to_end = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	if (!(filp->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 				sizeof(range)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 			!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 			!f2fs_hw_support_discard(sbi)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	file_start_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 			range.start >= inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	if (range.len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	if (inode->i_size - range.start > range.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 		end_addr = range.start + range.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		end_addr = range.len == (u64)-1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 			sbi->sb->s_maxbytes : inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		to_end = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	index = F2FS_BYTES_TO_BLK(range.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	ret = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	ret = filemap_write_and_wait_range(mapping, range.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 			to_end ? LLONG_MAX : end_addr - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	truncate_inode_pages_range(mapping, range.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 			to_end ? -1 : end_addr - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	while (index < pg_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 		struct dnode_of_data dn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 		pgoff_t end_offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		set_new_dnode(&dn, inode, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 			if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 				index = f2fs_get_next_page_offset(&dn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 		count = min(end_offset - dn.ofs_in_node, pg_end - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 			struct block_device *cur_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 			block_t blkaddr = f2fs_data_blkaddr(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 			if (!__is_valid_data_blkaddr(blkaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 						DATA_GENERIC_ENHANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 				ret = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 				f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 			if (f2fs_is_multi_device(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 				int di = f2fs_target_device_index(sbi, blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 				blkaddr -= FDEV(di).start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 			if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 				if (prev_bdev == cur_bdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 						index == prev_index + len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 						blkaddr == prev_block + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 					len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 					ret = f2fs_secure_erase(prev_bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 						inode, prev_index, prev_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 						len, range.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 					if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 						f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 					len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 			if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 				prev_bdev = cur_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 				prev_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 				prev_block = blkaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 				len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		f2fs_put_dnode(&dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 		if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 				prev_block, len, range.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	file_end_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 	struct f2fs_comp_option option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	if (!f2fs_compressed_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 		inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 		return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 	option.algorithm = F2FS_I(inode)->i_compress_algorithm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 				sizeof(option)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 	struct f2fs_comp_option option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	if (!f2fs_sb_has_compression(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 	if (!(filp->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 	if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 				sizeof(option)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	if (!f2fs_compressed_file(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 			option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 			option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 			option.algorithm >= COMPRESS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	file_start_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	if (inode->i_size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 		ret = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	F2FS_I(inode)->i_compress_algorithm = option.algorithm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	f2fs_mark_inode_dirty_sync(inode, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 	if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 		f2fs_warn(sbi, "compression algorithm is successfully set, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 			"but current kernel doesn't support this algorithm.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	file_end_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	pgoff_t redirty_idx = page_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	int i, page_len = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	page_cache_ra_unbounded(&ractl, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 	for (i = 0; i < len; i++, page_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 		page = read_cache_page(mapping, page_idx, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 		if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 			ret = PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 		page_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	for (i = 0; i < page_len; i++, redirty_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 		page = find_lock_page(mapping, redirty_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 		if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 		set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 		f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 		f2fs_put_page(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	struct f2fs_inode_info *fi = F2FS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	pgoff_t page_idx = 0, last_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	unsigned int blk_per_seg = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 	int cluster_size = F2FS_I(inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	int count, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	if (!f2fs_sb_has_compression(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	if (!(filp->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 	if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	file_start_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	if (!f2fs_is_compress_backend_ready(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	if (f2fs_is_mmap_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 	if (!atomic_read(&fi->i_compr_blocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	count = last_idx - page_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 		int len = min(cluster_size, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 		ret = redirty_blocks(inode, page_idx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 		if (get_dirty_pages(inode) >= blk_per_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 			filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 		count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 		page_idx += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 							LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 		f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 			  __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	file_end_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 	struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	pgoff_t page_idx = 0, last_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	unsigned int blk_per_seg = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	int cluster_size = F2FS_I(inode)->i_cluster_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	int count, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	if (!f2fs_sb_has_compression(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	if (!(filp->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 	if (!f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 	f2fs_balance_fs(F2FS_I_SB(inode), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	file_start_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	if (!f2fs_is_compress_backend_ready(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	if (f2fs_is_mmap_file(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	set_inode_flag(inode, FI_ENABLE_COMPRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	count = last_idx - page_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 		int len = min(cluster_size, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 		ret = redirty_blocks(inode, page_idx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		if (get_dirty_pages(inode) >= blk_per_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 			filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 		page_idx += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 							LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	clear_inode_flag(inode, FI_ENABLE_COMPRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 		f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 			  __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 	file_end_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	case FS_IOC_GETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 		return f2fs_ioc_getflags(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 	case FS_IOC_SETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 		return f2fs_ioc_setflags(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	case FS_IOC_GETVERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		return f2fs_ioc_getversion(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	case F2FS_IOC_START_ATOMIC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 		return f2fs_ioc_start_atomic_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		return f2fs_ioc_commit_atomic_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 	case F2FS_IOC_START_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 		return f2fs_ioc_start_volatile_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 		return f2fs_ioc_release_volatile_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 		return f2fs_ioc_abort_volatile_write(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 	case F2FS_IOC_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 		return f2fs_ioc_shutdown(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	case FITRIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 		return f2fs_ioc_fitrim(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	case FS_IOC_SET_ENCRYPTION_POLICY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 		return f2fs_ioc_set_encryption_policy(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 	case FS_IOC_GET_ENCRYPTION_POLICY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 		return f2fs_ioc_get_encryption_policy(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 	case FS_IOC_GET_ENCRYPTION_PWSALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	case FS_IOC_ADD_ENCRYPTION_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 		return f2fs_ioc_add_encryption_key(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 		return f2fs_ioc_remove_encryption_key(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		return f2fs_ioc_get_encryption_key_status(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 	case FS_IOC_GET_ENCRYPTION_NONCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 		return f2fs_ioc_get_encryption_nonce(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 	case F2FS_IOC_GARBAGE_COLLECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 		return f2fs_ioc_gc(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 		return f2fs_ioc_gc_range(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	case F2FS_IOC_WRITE_CHECKPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 		return f2fs_ioc_write_checkpoint(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	case F2FS_IOC_DEFRAGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		return f2fs_ioc_defragment(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	case F2FS_IOC_MOVE_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 		return f2fs_ioc_move_range(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	case F2FS_IOC_FLUSH_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 		return f2fs_ioc_flush_device(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	case F2FS_IOC_GET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 		return f2fs_ioc_get_features(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	case FS_IOC_FSGETXATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 		return f2fs_ioc_fsgetxattr(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	case FS_IOC_FSSETXATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 		return f2fs_ioc_fssetxattr(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	case F2FS_IOC_GET_PIN_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 		return f2fs_ioc_get_pin_file(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	case F2FS_IOC_SET_PIN_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 		return f2fs_ioc_set_pin_file(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	case F2FS_IOC_PRECACHE_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 		return f2fs_ioc_precache_extents(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	case F2FS_IOC_RESIZE_FS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 		return f2fs_ioc_resize_fs(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 	case FS_IOC_ENABLE_VERITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 		return f2fs_ioc_enable_verity(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	case FS_IOC_MEASURE_VERITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 		return f2fs_ioc_measure_verity(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	case FS_IOC_READ_VERITY_METADATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 		return f2fs_ioc_read_verity_metadata(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 	case FS_IOC_GETFSLABEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 		return f2fs_ioc_getfslabel(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	case FS_IOC_SETFSLABEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		return f2fs_ioc_setfslabel(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 		return f2fs_get_compress_blocks(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 		return f2fs_release_compress_blocks(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 		return f2fs_reserve_compress_blocks(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	case F2FS_IOC_SEC_TRIM_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 		return f2fs_sec_trim_file(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	case F2FS_IOC_GET_COMPRESS_OPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 		return f2fs_ioc_get_compress_option(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	case F2FS_IOC_SET_COMPRESS_OPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 		return f2fs_ioc_set_compress_option(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	case F2FS_IOC_DECOMPRESS_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 		return f2fs_ioc_decompress_file(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 	case F2FS_IOC_COMPRESS_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		return f2fs_ioc_compress_file(filp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 	return __f2fs_ioctl(filp, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	if (!f2fs_is_compress_backend_ready(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 	ret = generic_file_read_iter(iocb, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	if (!f2fs_is_compress_backend_ready(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 		if (!inode_trylock(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 		inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	if (unlikely(IS_IMMUTABLE(inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	ret = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 	if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 		bool preallocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 		size_t target_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 			set_inode_flag(inode, FI_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 						iov_iter_count(from)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 				f2fs_has_inline_data(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 				f2fs_force_buffered_io(inode, iocb, from)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 				clear_inode_flag(inode, FI_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 				inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 				ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 			goto write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 			goto write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 		if (iocb->ki_flags & IOCB_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 			 * Convert inline data for Direct I/O before entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 			 * f2fs_direct_IO().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 			err = f2fs_convert_inline_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 				goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 			 * If force_buffere_io() is true, we have to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 			 * blocks all the time, since f2fs_direct_IO will fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 			 * back to buffered IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 			if (!f2fs_force_buffered_io(inode, iocb, from) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 					allow_outplace_dio(inode, iocb, from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 				goto write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		preallocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		target_size = iocb->ki_pos + iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 		err = f2fs_preallocate_blocks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 			clear_inode_flag(inode, FI_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 			inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 			ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 		ret = __generic_file_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 		clear_inode_flag(inode, FI_NO_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		/* if we couldn't write data, we should deallocate blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		if (preallocated && i_size_read(inode) < target_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 			f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 			f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 			f2fs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 			f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 					iov_iter_count(from), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 		ret = generic_write_sync(iocb, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) struct compat_f2fs_gc_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 	u32 sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	compat_u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	compat_u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) #define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 						struct compat_f2fs_gc_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 	struct compat_f2fs_gc_range __user *urange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 	struct f2fs_gc_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	urange = compat_ptr(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	err = get_user(range.sync, &urange->sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 	err |= get_user(range.start, &urange->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	err |= get_user(range.len, &urange->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 	return __f2fs_ioc_gc_range(file, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) struct compat_f2fs_move_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	u32 dst_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	compat_u64 pos_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	compat_u64 pos_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 	compat_u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) #define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 					struct compat_f2fs_move_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 	struct compat_f2fs_move_range __user *urange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	struct f2fs_move_range range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 	urange = compat_ptr(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	err = get_user(range.dst_fd, &urange->dst_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	err |= get_user(range.pos_in, &urange->pos_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	err |= get_user(range.pos_out, &urange->pos_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 	err |= get_user(range.len, &urange->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 	return __f2fs_ioc_move_range(file, &range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	case FS_IOC32_GETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 		cmd = FS_IOC_GETFLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	case FS_IOC32_SETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 		cmd = FS_IOC_SETFLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	case FS_IOC32_GETVERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 		cmd = FS_IOC_GETVERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 		return f2fs_compat_ioc_gc_range(file, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 	case F2FS_IOC32_MOVE_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 		return f2fs_compat_ioc_move_range(file, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 	case F2FS_IOC_START_ATOMIC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 	case F2FS_IOC_START_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 	case F2FS_IOC_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 	case FITRIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 	case FS_IOC_SET_ENCRYPTION_POLICY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 	case FS_IOC_GET_ENCRYPTION_PWSALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 	case FS_IOC_GET_ENCRYPTION_POLICY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	case FS_IOC_ADD_ENCRYPTION_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 	case FS_IOC_GET_ENCRYPTION_NONCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	case F2FS_IOC_GARBAGE_COLLECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	case F2FS_IOC_WRITE_CHECKPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	case F2FS_IOC_DEFRAGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	case F2FS_IOC_FLUSH_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	case F2FS_IOC_GET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 	case FS_IOC_FSGETXATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	case FS_IOC_FSSETXATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	case F2FS_IOC_GET_PIN_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 	case F2FS_IOC_SET_PIN_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	case F2FS_IOC_PRECACHE_EXTENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 	case F2FS_IOC_RESIZE_FS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	case FS_IOC_ENABLE_VERITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 	case FS_IOC_MEASURE_VERITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	case FS_IOC_READ_VERITY_METADATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	case FS_IOC_GETFSLABEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	case FS_IOC_SETFSLABEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 	case F2FS_IOC_SEC_TRIM_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 	case F2FS_IOC_GET_COMPRESS_OPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	case F2FS_IOC_SET_COMPRESS_OPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 	case F2FS_IOC_DECOMPRESS_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	case F2FS_IOC_COMPRESS_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 		return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) const struct file_operations f2fs_file_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 	.llseek		= f2fs_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 	.read_iter	= f2fs_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 	.write_iter	= f2fs_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 	.open		= f2fs_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	.release	= f2fs_release_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 	.mmap		= f2fs_file_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 	.flush		= f2fs_file_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 	.fsync		= f2fs_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 	.fallocate	= f2fs_fallocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	.unlocked_ioctl	= f2fs_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	.compat_ioctl	= f2fs_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 	.splice_read	= generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 	.splice_write	= iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) };