Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/iversion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include "compression.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "delalloc-space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "reflink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define BTRFS_MAX_DEDUPE_LEN	SZ_16M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 				     struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 				     u64 endoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 				     const u64 destoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 				     const u64 olen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 				     int no_time_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct btrfs_root *root = BTRFS_I(inode)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	inode_inc_iversion(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	if (!no_time_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	 * We round up to the block size at eof when determining which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	 * extents to clone above, but shouldn't round up the file size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (endoff > destoff + olen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		endoff = destoff + olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (endoff > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		i_size_write(inode, endoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		btrfs_inode_safe_disk_i_size_write(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	ret = btrfs_update_inode(trans, root, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ret = btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static int copy_inline_to_page(struct btrfs_inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			       const u64 file_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			       char *inline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			       const u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			       const u64 datal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			       const u8 comp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	const u64 block_size = btrfs_inode_sectorsize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	const u64 range_end = file_offset + block_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct extent_changeset *data_reserved = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct address_space *mapping = inode->vfs_inode.i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	ASSERT(IS_ALIGNED(file_offset, block_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * We have flushed and locked the ranges of the source and destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * inodes, we also have locked the inodes, so we are safe to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * reservation here. Also we must not do the reservation while holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * a transaction open, otherwise we would deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 					   block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				   btrfs_alloc_write_mask(mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	set_page_extent_mapped(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	clear_extent_bit(&inode->io_tree, file_offset, range_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * After dirtying the page our caller will need to start a transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * and if we are low on metadata free space, that can cause flushing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * delalloc for all inodes in order to get metadata space released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * However we are holding the range locked for the whole duration of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * the clone/dedupe operation, so we may deadlock if that happens and no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 * other task releases enough space. So mark this inode as not being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 * possible to flush to avoid such deadlock. We will clear that flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * when we finish cloning all extents, since a transaction is started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * after finding each extent to clone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (comp_type == BTRFS_COMPRESS_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		char *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		map = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		memcpy(map, data_start, datal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		ret = btrfs_decompress(comp_type, data_start, page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				       inline_size, datal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * If our inline data is smaller then the block/page size, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * remaining of the block/page is equivalent to zeroes. We had something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * like the following done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * $ sync  # (or fsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * $ xfs_io -c "falloc 0 4K" file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * So what's in the range [500, 4095] corresponds to zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (datal < block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		char *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		map = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		memset(map + datal, 0, block_size - datal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	ClearPageChecked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		btrfs_delalloc_release_space(inode, data_reserved, file_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 					     block_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	btrfs_delalloc_release_extents(inode, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	extent_changeset_free(data_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * Deal with cloning of inline extents. We try to copy the inline extent from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * the source inode to destination inode when possible. When not possible we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * copy the inline extent's data into the respective page of the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static int clone_copy_inline_extent(struct inode *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				    struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				    struct btrfs_key *new_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 				    const u64 drop_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 				    const u64 datal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				    const u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 				    const u8 comp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 				    char *inline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 				    struct btrfs_trans_handle **trans_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct btrfs_root *root = BTRFS_I(dst)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	const u64 aligned_end = ALIGN(new_key->offset + datal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 				      fs_info->sectorsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct btrfs_trans_handle *trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (new_key->offset > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 					  inline_data, size, datal, comp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	key.objectid = btrfs_ino(BTRFS_I(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	} else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				goto copy_inline_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		    key.type == BTRFS_EXTENT_DATA_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			 * There's an implicit hole at file offset 0, copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			 * inline extent's data to the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			ASSERT(key.offset > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			goto copy_to_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	} else if (i_size_read(dst) <= datal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				    struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		 * If it's an inline extent replace it with the source inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		 * extent, otherwise copy the source inline extent data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		 * the respective page at the destination inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		if (btrfs_file_extent_type(path->nodes[0], ei) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		    BTRFS_FILE_EXTENT_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			goto copy_inline_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		goto copy_to_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) copy_inline_extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * We have no extent items, or we have an extent at offset 0 which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * or may not be inlined. All these cases are dealt the same way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (i_size_read(dst) > datal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		 * At the destination offset 0 we have either a hole, a regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		 * extent or an inline extent larger then the one we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		 * clone. Deal with all these cases by copying the inline extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		 * data into the respective page at the destination inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		goto copy_to_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 * Release path before starting a new transaction so we don't hold locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 * that would confuse lockdep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * If we end up here it means were copy the inline extent into a leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * of the destination inode. We know we will drop or adjust at most one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * extent item in the destination root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * 1 unit - adjusting old extent (we may have to split it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * 1 unit - add new extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * 1 unit - inode update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	trans = btrfs_start_transaction(root, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	write_extent_buffer(path->nodes[0], inline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			    btrfs_item_ptr_offset(path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 						  path->slots[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			    size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	inode_add_bytes(dst, datal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (!ret && !trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		 * No transaction here means we copied the inline extent into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		 * page of the destination inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		 * 1 unit to update inode item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		trans = btrfs_start_transaction(root, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (ret && trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		*trans_out = trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) copy_to_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * Release our path because we don't need it anymore and also because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 * copy_inline_to_page() needs to reserve data and metadata, which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 * need to flush delalloc when we are low on available space and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * therefore cause a deadlock if writeback of an inline extent needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * write to the same leaf or an ordered extent completion needs to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * to the same leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 				  inline_data, size, datal, comp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * btrfs_clone() - clone a range from inode file to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * @src: Inode to clone from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * @inode: Inode to clone to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @off: Offset within source to start clone from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * @olen: Original length, passed by user, of range to clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * @olen_aligned: Block-aligned value of olen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * @destoff: Offset within @inode to start clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @no_time_update: Whether to update mtime/ctime on the target inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int btrfs_clone(struct inode *src, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		       const u64 off, const u64 olen, const u64 olen_aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		       const u64 destoff, int no_time_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	u32 nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	const u64 len = olen_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	u64 last_dest_end = destoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	path->reada = READA_FORWARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	/* Clone data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	key.objectid = btrfs_ino(BTRFS_I(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	key.offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		u64 next_key_min_offset = key.offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		struct btrfs_file_extent_item *extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		u64 extent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		struct btrfs_key new_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		u64 disko = 0, diskl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		u64 datao = 0, datal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		u8 comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		u64 drop_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		/* Note the key will change type as we walk through the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 				0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		 * First search, if no extent item that starts at offset off was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		 * found but the previous item is an extent item, it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		 * it might overlap our target range, therefore process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		if (key.offset == off && ret > 0 && path->slots[0] > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			btrfs_item_key_to_cpu(path->nodes[0], &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					      path->slots[0] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			if (key.type == BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 				path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		nritems = btrfs_header_nritems(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) process_slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (path->slots[0] >= nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			nritems = btrfs_header_nritems(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if (key.type > BTRFS_EXTENT_DATA_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		    key.objectid != btrfs_ino(BTRFS_I(src)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		extent = btrfs_item_ptr(leaf, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 					struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		extent_gen = btrfs_file_extent_generation(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		comp = btrfs_file_extent_compression(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		type = btrfs_file_extent_type(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		if (type == BTRFS_FILE_EXTENT_REG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		    type == BTRFS_FILE_EXTENT_PREALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			disko = btrfs_file_extent_disk_bytenr(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			datao = btrfs_file_extent_offset(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			datal = btrfs_file_extent_num_bytes(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		} else if (type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			/* Take upper bound, may be compressed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			datal = btrfs_file_extent_ram_bytes(leaf, extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		 * The first search might have left us at an extent item that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		 * ends before our target range's start, can happen if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		 * holes and NO_HOLES feature enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		if (key.offset + datal <= off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			goto process_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		} else if (key.offset >= off + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		next_key_min_offset = key.offset + datal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		size = btrfs_item_size_nr(leaf, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 				   size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		path->leave_spinning = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		memcpy(&new_key, &key, sizeof(new_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		new_key.objectid = btrfs_ino(BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (off <= key.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			new_key.offset = key.offset + destoff - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			new_key.offset = destoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		 * Deal with a hole that doesn't have an extent item that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		 * represents it (NO_HOLES feature enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		 * This hole is either in the middle of the cloning range or at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		 * the beginning (fully overlaps it or partially overlaps it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		if (new_key.offset != last_dest_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			drop_start = last_dest_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			drop_start = new_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		if (type == BTRFS_FILE_EXTENT_REG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		    type == BTRFS_FILE_EXTENT_PREALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			struct btrfs_replace_extent_info clone_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			 *    a  | --- range to clone ---|  b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			 * | ------------- extent ------------- |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			/* Subtract range b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			if (key.offset + datal > off + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 				datal = off + len - key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			/* Subtract range a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			if (off > key.offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 				datao += off - key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 				datal -= off - key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			clone_info.disk_offset = disko;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			clone_info.disk_len = diskl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			clone_info.data_offset = datao;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			clone_info.data_len = datal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			clone_info.file_offset = new_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			clone_info.extent_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			clone_info.is_new_extent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			ret = btrfs_replace_file_extents(inode, path, drop_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 					new_key.offset + datal - 1, &clone_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 					&trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		} else if (type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			 * Inline extents always have to start at file offset 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			 * and can never be bigger then the sector size. We can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			 * never clone only parts of an inline extent, since all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			 * reflink operations must start at a sector size aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			 * offset, and the length must be aligned too or end at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			 * the i_size (which implies the whole inlined data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			ASSERT(key.offset == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			ASSERT(datal <= fs_info->sectorsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			if (WARN_ON(key.offset != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			    WARN_ON(datal > fs_info->sectorsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 				ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			ret = clone_copy_inline_extent(inode, path, &new_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 						       drop_start, datal, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 						       comp, buf, &trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		 * If this is a new extent update the last_reflink_trans of both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		 * inodes. This is used by fsync to make sure it does not log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		 * multiple checksum items with overlapping ranges. For older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		 * extents we don't need to do it since inode logging skips the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		 * checksums for older extents. Also ignore holes and inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		 * extents because they don't have checksums in the csum tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		if (extent_gen == trans->transid && disko > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			BTRFS_I(src)->last_reflink_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			BTRFS_I(inode)->last_reflink_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		last_dest_end = ALIGN(new_key.offset + datal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 				      fs_info->sectorsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		ret = clone_finish_inode_update(trans, inode, last_dest_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 						destoff, olen, no_time_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		if (new_key.offset + datal >= destoff + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		key.offset = next_key_min_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (last_dest_end < destoff + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		 * We have an implicit hole that fully or partially overlaps our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		 * cloning range at its end. This means that we either have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		 * NO_HOLES feature enabled or the implicit hole happened due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		 * mixing buffered and direct IO writes against this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		path->leave_spinning = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		 * When using NO_HOLES and we are cloning a range that covers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		 * only a hole (no extents) into a range beyond the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		 * i_size, punching a hole in the target range will not create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		 * an extent map defining a hole, because the range starts at or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		 * beyond current i_size. If the file previously had an i_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		 * greater than the new i_size set by this clone operation, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		 * need to make sure the next fsync is a full fsync, so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		 * detects and logs a hole covering a range from the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		 * i_size to the new i_size. If the clone range covers extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		 * besides a hole, then we know the full sync flag was already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		 * set by previous calls to btrfs_replace_file_extents() that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		 * replaced file extent items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		if (last_dest_end >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 				&BTRFS_I(inode)->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		ret = btrfs_replace_file_extents(inode, path, last_dest_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 				destoff + len - 1, NULL, &trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		ret = clone_finish_inode_update(trans, inode, destoff + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 						destoff, olen, no_time_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 				       struct inode *inode2, u64 loff2, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 				     struct inode *inode2, u64 loff2, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if (inode1 < inode2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		swap(inode1, inode2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		swap(loff1, loff2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	} else if (inode1 == inode2 && loff2 < loff1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		swap(loff1, loff2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 				   struct inode *dst, u64 dst_loff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	 * Lock destination range to serialize with concurrent readpages() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	 * source range to serialize with relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 			     struct inode *dst, u64 dst_loff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	u64 i, tail_len, chunk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	struct btrfs_root *root_dst = BTRFS_I(dst)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	spin_lock(&root_dst->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	if (root_dst->send_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		btrfs_warn_rl(root_dst->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			      root_dst->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			      root_dst->send_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		spin_unlock(&root_dst->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	root_dst->dedupe_in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	spin_unlock(&root_dst->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	for (i = 0; i < chunk_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 					      dst, dst_loff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		loff += BTRFS_MAX_DEDUPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		dst_loff += BTRFS_MAX_DEDUPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	if (tail_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	spin_lock(&root_dst->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	root_dst->dedupe_in_progress--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	spin_unlock(&root_dst->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 					u64 off, u64 olen, u64 destoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	struct inode *src = file_inode(file_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	int wb_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	u64 len = olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	u64 bs = fs_info->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	 * VFS's generic_remap_file_range_prep() protects us from cloning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	 * eof block into the middle of a file, which would result in corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	 * if the file size is not blocksize aligned. So we don't need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	 * for that case here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	if (off + len == src->i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		len = ALIGN(src->i_size, bs) - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	if (destoff > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		ret = btrfs_cont_expand(inode, inode->i_size, destoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		 * We may have truncated the last block if the inode's size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		 * not sector size aligned, so we need to wait for writeback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		 * complete before proceeding further, otherwise we can race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		 * with cloning and attempt to increment a reference to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		 * extent that no longer exists (writeback completed right after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		 * we found the previous extent covering eof and before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		 * attempted to increment its reference count).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		ret = btrfs_wait_ordered_range(inode, wb_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 					       destoff - wb_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	 * Lock destination range to serialize with concurrent readpages() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	 * source range to serialize with relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	btrfs_double_extent_lock(src, off, inode, destoff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	btrfs_double_extent_unlock(src, off, inode, destoff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	 * We may have copied an inline extent into a page of the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	 * range, so wait for writeback to complete before truncating pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	 * from the page cache. This is a rare case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	ret = ret ? ret : wb_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	 * Truncate page cache pages so that future reads will see the cloned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	 * data immediately and not the previous data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	truncate_inode_pages_range(&inode->i_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 				round_down(destoff, PAGE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 				round_up(destoff + len, PAGE_SIZE) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 				       struct file *file_out, loff_t pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 				       loff_t *len, unsigned int remap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	struct inode *inode_in = file_inode(file_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	struct inode *inode_out = file_inode(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	bool same_inode = inode_out == inode_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	u64 wb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	if (!(remap_flags & REMAP_FILE_DEDUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		if (btrfs_root_readonly(root_out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		if (file_in->f_path.mnt != file_out->f_path.mnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		    inode_in->i_sb != inode_out->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	/* Don't make the dst file partly checksummed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	    (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	 * Now that the inodes are locked, we need to start writeback ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	 * and can not rely on the writeback from the VFS's generic helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	 * generic_remap_file_range_prep() because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	 * 1) For compression we must call filemap_fdatawrite_range() range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	 *    twice (btrfs_fdatawrite_range() does it for us), and the generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	 *    helper only calls it once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	 * 2) filemap_fdatawrite_range(), called by the generic helper only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	 *    waits for the writeback to complete, i.e. for IO to be done, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	 *    not for the ordered extents to complete. We need to wait for them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	 *    to complete so that new file extent items are in the fs tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		wb_len = ALIGN(*len, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	 * any in progress could create its ordered extents after we wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	 * existing ordered extents below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	inode_dio_wait(inode_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	if (!same_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		inode_dio_wait(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	 * Btrfs' back references do not have a block level granularity, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	 * work at the whole extent level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	 * NOCOW buffered write without data space reserved may not be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	 * to fall back to CoW due to lack of data space, thus could cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	 * data loss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	 * Here we take a shortcut by flushing the whole inode, so that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	 * nocow write should reach disk as nocow before we increase the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	 * reference of the extent. We could do better by only flushing NOCOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	 * data, but that needs extra accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	 * Also we don't need to check ASYNC_EXTENT, as async extent will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	 * CoWed anyway, not affecting nocow part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	ret = filemap_flush(inode_in->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 				       wb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 				       wb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 					    len, remap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		struct file *dst_file, loff_t destoff, loff_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		unsigned int remap_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	struct inode *src_inode = file_inode(src_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	struct inode *dst_inode = file_inode(dst_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	bool same_inode = dst_inode == src_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	if (same_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		inode_lock(src_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 		lock_two_nondirectories(src_inode, dst_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 					  &len, remap_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	if (ret < 0 || len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	if (remap_flags & REMAP_FILE_DEDUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 		ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 		ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	if (same_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 		inode_unlock(src_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 		unlock_two_nondirectories(src_inode, dst_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	return ret < 0 ? ret : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }