Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2012 Alexander Block.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/bsearch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/xattr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/posix_acl_xattr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/radix-tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "send.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "backref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "locking.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "btrfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "compression.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * Maximum number of references an extent can have in order for us to attempt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * issue clone operations instead of write operations. This currently exists to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * avoid hitting limitations of the backreference walking code (taking a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * time and using too much memory for extents with large number of references).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define SEND_MAX_EXTENT_REFS	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * A fs_path is a helper to dynamically build path names with unknown size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * It reallocates the internal buffer on demand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * It allows fast adding of path elements on the right side (normal path) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * fast adding to the left side (reversed path). A reversed path can also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * unreversed if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) struct fs_path {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 			char *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 			char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 			char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 			unsigned short buf_len:15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 			unsigned short reversed:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 			char inline_buf[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		 * Average path length does not exceed 200 bytes, we'll have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		 * better packing in the slab and higher chance to satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		 * a allocation later during send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		char pad[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define FS_PATH_INLINE_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	(sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) /* reused for each extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) struct clone_root {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	u64 found_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) struct send_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct file *send_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	loff_t send_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	char *send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	u32 send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	u32 send_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	u64 total_send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	u64 flags;	/* 'flags' member of btrfs_ioctl_send_args is u64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct btrfs_root *send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct btrfs_root *parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct clone_root *clone_roots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int clone_roots_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* current state of the compare_tree call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct btrfs_path *left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct btrfs_path *right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct btrfs_key *cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * infos of the currently processed inode. In case of deleted inodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 * these are the values from the deleted inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	u64 cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u64 cur_inode_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	int cur_inode_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int cur_inode_new_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int cur_inode_deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	u64 cur_inode_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u64 cur_inode_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	u64 cur_inode_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u64 cur_inode_last_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	u64 cur_inode_next_write_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	bool ignore_cur_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u64 send_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct list_head new_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct list_head deleted_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct radix_tree_root name_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct list_head name_cache_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	int name_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct file_ra_state ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * We process inodes by their increasing order, so if before an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * incremental send we reverse the parent/child relationship of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * directories such that a directory with a lower inode number was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * the parent of a directory with a higher inode number, and the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 * becoming the new parent got renamed too, we can't rename/move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	 * directory with lower inode number when we finish processing it - we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 * must process the directory with higher inode number first, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * rename/move it and then rename/move the directory with lower inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * number. Example follows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * Tree state when the first send was performed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * |-- a                   (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 *     |-- b               (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 *         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 *         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 *         |-- c           (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	 *         |   |-- d       (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	 *         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	 *         |-- c2          (ino 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * Tree state when the second (incremental) send is performed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 * .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * |-- a                   (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 *     |-- b               (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 *         |-- c2          (ino 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 *             |-- d2      (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	 *                 |-- cc  (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	 * The sequence of steps that lead to the second state was:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	 * mv /a/b/c/d /a/b/c2/d2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	 * mv /a/b/c /a/b/c2/d2/cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	 * "c" has lower inode number, but we can't move it (2nd mv operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	 * before we move "d", which has higher inode number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	 * So we just memorize which move/rename operations must be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	 * later when their respective parent is processed and moved/renamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	/* Indexed by parent directory inode number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct rb_root pending_dir_moves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * Reverse index, indexed by the inode number of a directory that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * is waiting for the move/rename of its immediate parent before its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 * own move/rename can be performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	struct rb_root waiting_dir_moves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 * A directory that is going to be rm'ed might have a child directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	 * which is in the pending directory moves index above. In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 * the directory can only be removed after the move/rename of its child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 * is performed. Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 * Parent snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * .                        (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 * |-- a/                   (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 *     |-- b/               (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 *         |-- c/           (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	 *         |   |-- x/       (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	 *         |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	 *         |-- y/           (ino 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * Send snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * .                        (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 * |-- a/                   (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 *     |-- b/               (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 *         |-- YY/          (ino 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 *              |-- x/      (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * Sequence of steps that lead to the send snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * rm -f /a/b/c/foo.txt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * mv /a/b/y /a/b/YY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 * mv /a/b/c/x /a/b/YY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	 * rmdir /a/b/c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 * When the child is processed, its move/rename is delayed until its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 * parent is processed (as explained above), but all other operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * like update utimes, chown, chgrp, etc, are performed and the paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * that it uses for those operations must use the orphanized name of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 * its parent (the directory we're going to rm later), so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	 * memorize that name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	 * Indexed by the inode number of the directory to be deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	struct rb_root orphan_dirs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) struct pending_dir_move {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	u64 parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct list_head update_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) struct waiting_dir_move {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	 * There might be some directory that could not be removed because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 * was waiting for this directory inode to be moved first. Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u64 rmdir_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	u64 rmdir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	bool orphanized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) struct orphan_dir_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	u64 last_dir_index_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) struct name_cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 * radix_tree has only 32bit entries but we need to handle 64bit inums.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 * We use the lower 32bit of the 64bit inum to store it in the tree. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	 * more then one inum would fall into the same entry, we use radix_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	 * to store the additional entries. radix_list is also used to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	 * entries where two entries have the same inum but different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * generations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct list_head radix_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	u64 parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	u64 parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	int need_later_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	char name[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) #define ADVANCE							1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) #define ADVANCE_ONLY_NEXT					-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) enum btrfs_compare_tree_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	BTRFS_COMPARE_TREE_NEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	BTRFS_COMPARE_TREE_DELETED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	BTRFS_COMPARE_TREE_CHANGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	BTRFS_COMPARE_TREE_SAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) __cold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static void inconsistent_snapshot_error(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 					enum btrfs_compare_tree_result result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 					const char *what)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	const char *result_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	case BTRFS_COMPARE_TREE_NEW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		result_string = "new";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	case BTRFS_COMPARE_TREE_DELETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		result_string = "deleted";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	case BTRFS_COMPARE_TREE_CHANGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		result_string = "updated";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	case BTRFS_COMPARE_TREE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		result_string = "unchanged";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		result_string = "unexpected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	btrfs_err(sctx->send_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		  "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		  result_string, what, sctx->cmp_key->objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		  sctx->send_root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		  (sctx->parent_root ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		   sctx->parent_root->root_key.objectid : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static struct waiting_dir_move *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static int need_send_hole(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	return (sctx->parent_root && !sctx->cur_inode_new &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		S_ISREG(sctx->cur_inode_mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static void fs_path_reset(struct fs_path *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (p->reversed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		p->start = p->buf + p->buf_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		p->end = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		*p->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		p->start = p->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		p->end = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		*p->start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static struct fs_path *fs_path_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	p = kmalloc(sizeof(*p), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	p->reversed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	p->buf = p->inline_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	p->buf_len = FS_PATH_INLINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	fs_path_reset(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static struct fs_path *fs_path_alloc_reversed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	p->reversed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	fs_path_reset(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static void fs_path_free(struct fs_path *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (p->buf != p->inline_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		kfree(p->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) static int fs_path_len(struct fs_path *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return p->end - p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static int fs_path_ensure_buf(struct fs_path *p, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	char *tmp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	int path_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	int old_buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (p->buf_len >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	if (len > PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	path_len = p->end - p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	old_buf_len = p->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 * First time the inline_buf does not suffice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (p->buf == p->inline_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		tmp_buf = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		if (tmp_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			memcpy(tmp_buf, p->buf, old_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (!tmp_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	p->buf = tmp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	 * The real size of the buffer is bigger, this will let the fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	 * happen most of the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	p->buf_len = ksize(p->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (p->reversed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		tmp_buf = p->buf + old_buf_len - path_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		p->end = p->buf + p->buf_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		p->start = p->end - path_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		memmove(p->start, tmp_buf, path_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		p->start = p->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		p->end = p->start + path_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				   char **prepared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	int new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	new_len = p->end - p->start + name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (p->start != p->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		new_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	ret = fs_path_ensure_buf(p, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (p->reversed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		if (p->start != p->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			*--p->start = '/';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		p->start -= name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		*prepared = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		if (p->start != p->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			*p->end++ = '/';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		*prepared = p->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		p->end += name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		*p->end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static int fs_path_add(struct fs_path *p, const char *name, int name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	char *prepared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	ret = fs_path_prepare_for_add(p, name_len, &prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	memcpy(prepared, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	char *prepared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	memcpy(prepared, p2->start, p2->end - p2->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static int fs_path_add_from_extent_buffer(struct fs_path *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 					  struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 					  unsigned long off, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	char *prepared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	ret = fs_path_prepare_for_add(p, len, &prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	read_extent_buffer(eb, prepared, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static int fs_path_copy(struct fs_path *p, struct fs_path *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	p->reversed = from->reversed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	fs_path_reset(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	ret = fs_path_add_path(p, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static void fs_path_unreverse(struct fs_path *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	char *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (!p->reversed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	tmp = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	len = p->end - p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	p->start = p->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	p->end = p->start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	memmove(p->start, tmp, len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	p->reversed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static struct btrfs_path *alloc_path_for_send(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	path->need_commit_sem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	u32 pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	while (pos < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		ret = kernel_write(filp, buf + pos, len - pos, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		/* TODO handle that correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		/*if (ret == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		}*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	struct btrfs_tlv_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	int total_len = sizeof(*hdr) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	int left = sctx->send_max_size - sctx->send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (unlikely(left < total_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	put_unaligned_le16(attr, &hdr->tlv_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	put_unaligned_le16(len, &hdr->tlv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	memcpy(hdr + 1, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	sctx->send_size += total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) #define TLV_PUT_DEFINE_INT(bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	static int tlv_put_u##bits(struct send_ctx *sctx,	 	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			u##bits attr, u##bits value)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	{								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		__le##bits __tmp = cpu_to_le##bits(value);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return tlv_put(sctx, attr, &__tmp, sizeof(__tmp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) TLV_PUT_DEFINE_INT(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static int tlv_put_string(struct send_ctx *sctx, u16 attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			  const char *str, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (len == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		len = strlen(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return tlv_put(sctx, attr, str, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			const u8 *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				  struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				  struct btrfs_timespec *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct btrfs_timespec bts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	return tlv_put(sctx, attr, &bts, sizeof(bts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) #define TLV_PUT(sctx, attrtype, data, attrlen) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		ret = tlv_put(sctx, attrtype, data, attrlen); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) #define TLV_PUT_INT(sctx, attrtype, bits, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		ret = tlv_put_u##bits(sctx, attrtype, value); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) #define TLV_PUT_STRING(sctx, attrtype, str, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		ret = tlv_put_string(sctx, attrtype, str, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) #define TLV_PUT_PATH(sctx, attrtype, p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		ret = tlv_put_string(sctx, attrtype, p->start, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			p->end - p->start); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	} while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) #define TLV_PUT_UUID(sctx, attrtype, uuid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		ret = tlv_put_uuid(sctx, attrtype, uuid); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			goto tlv_put_failure; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static int send_header(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct btrfs_stream_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 					&sctx->send_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * For each command/item we want to send to userspace, we call this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static int begin_cmd(struct send_ctx *sctx, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct btrfs_cmd_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (WARN_ON(!sctx->send_buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	BUG_ON(sctx->send_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	sctx->send_size += sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	put_unaligned_le16(cmd, &hdr->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static int send_cmd(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct btrfs_cmd_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	put_unaligned_le32(0, &hdr->crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	put_unaligned_le32(crc, &hdr->crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 					&sctx->send_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	sctx->total_send_size += sctx->send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	sctx->send_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * Sends a move instruction to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static int send_rename(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		     struct fs_path *from, struct fs_path *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * Sends a link instruction to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static int send_link(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		     struct fs_path *path, struct fs_path *lnk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  * Sends an unlink instruction to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	btrfs_debug(fs_info, "send_unlink %s", path->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  * Sends a rmdir instruction to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	btrfs_debug(fs_info, "send_rmdir %s", path->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * Helper function to retrieve some fields from an inode item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			  u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			  u64 *gid, u64 *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct btrfs_inode_item *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	key.objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	key.type = BTRFS_INODE_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		*size = btrfs_inode_size(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		*gen = btrfs_inode_generation(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		*mode = btrfs_inode_mode(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		*uid = btrfs_inode_uid(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		*gid = btrfs_inode_gid(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		*rdev = btrfs_inode_rdev(path->nodes[0], ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) static int get_inode_info(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			  u64 ino, u64 *size, u64 *gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			  u64 *mode, u64 *uid, u64 *gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			  u64 *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			       rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				   struct fs_path *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				   void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  * Helper function to iterate the entries in ONE btrfs_inode_ref or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  * btrfs_inode_extref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879)  * The iterate callback may return a non zero value to stop iteration. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  * be a negative value for error codes or 1 to simply stop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * path must point to the INODE_REF or INODE_EXTREF when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			     struct btrfs_key *found_key, int resolve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			     iterate_inode_ref_t iterate, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct extent_buffer *eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct btrfs_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct btrfs_inode_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	struct btrfs_path *tmp_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	u32 cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	u32 total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	u32 name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	char *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	u64 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	unsigned long name_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	unsigned long elem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	p = fs_path_alloc_reversed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	tmp_path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (!tmp_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (found_key->type == BTRFS_INODE_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		ptr = (unsigned long)btrfs_item_ptr(eb, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 						    struct btrfs_inode_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		item = btrfs_item_nr(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		total = btrfs_item_size(eb, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		elem_size = sizeof(*iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		ptr = btrfs_item_ptr_offset(eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		total = btrfs_item_size_nr(eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		elem_size = sizeof(*extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	while (cur < total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		fs_path_reset(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		if (found_key->type == BTRFS_INODE_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			iref = (struct btrfs_inode_ref *)(ptr + cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			name_len = btrfs_inode_ref_name_len(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			name_off = (unsigned long)(iref + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			index = btrfs_inode_ref_index(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			dir = found_key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			extref = (struct btrfs_inode_extref *)(ptr + cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			name_len = btrfs_inode_extref_name_len(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			name_off = (unsigned long)&extref->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			index = btrfs_inode_extref_index(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			dir = btrfs_inode_extref_parent(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		if (resolve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			start = btrfs_ref_to_path(root, tmp_path, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 						  name_off, eb, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 						  p->buf, p->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			if (IS_ERR(start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				ret = PTR_ERR(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			if (start < p->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				/* overflow , try again with larger buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				ret = fs_path_ensure_buf(p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 						p->buf_len + p->buf - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				start = btrfs_ref_to_path(root, tmp_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 							  name_len, name_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 							  eb, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 							  p->buf, p->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 				if (IS_ERR(start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 					ret = PTR_ERR(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				BUG_ON(start < p->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			p->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			ret = fs_path_add_from_extent_buffer(p, eb, name_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 							     name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		cur += elem_size + name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		ret = iterate(num, dir, index, p, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	btrfs_free_path(tmp_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				  const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 				  const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 				  u8 type, void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * Helper function to iterate the entries in ONE btrfs_dir_item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  * The iterate callback may return a non zero value to stop iteration. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  * be a negative value for error codes or 1 to simply stop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * path must point to the dir item when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			    iterate_dir_item_t iterate, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	struct btrfs_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct btrfs_key di_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	int buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	u32 name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u32 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	u32 cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	u32 total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 * Start with a small buffer (1 page). If later we end up needing more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 * space, which can happen for xattrs on a fs with a leaf size greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * then the page size, attempt to increase the buffer. Typically xattr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 * values are small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	buf_len = PATH_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	buf = kmalloc(buf_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	item = btrfs_item_nr(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	total = btrfs_item_size(eb, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	while (cur < total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		name_len = btrfs_dir_name_len(eb, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		data_len = btrfs_dir_data_len(eb, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		type = btrfs_dir_type(eb, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		btrfs_dir_item_key_to_cpu(eb, di, &di_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (type == BTRFS_FT_XATTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			if (name_len > XATTR_NAME_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				ret = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			if (name_len + data_len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 					BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				ret = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			 * Path too long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if (name_len + data_len > PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				ret = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		if (name_len + data_len > buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			buf_len = name_len + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			if (is_vmalloc_addr(buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				vfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 				buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				char *tmp = krealloc(buf, buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 						GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 				if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 					kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				buf = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				buf = kvmalloc(buf_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 				if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 					ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		read_extent_buffer(eb, buf, (unsigned long)(di + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				name_len + data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		len = sizeof(*di) + name_len + data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		di = (struct btrfs_dir_item *)((char *)di + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		cur += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		ret = iterate(num, &di_key, buf, name_len, buf + name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				data_len, type, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	kvfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static int __copy_first_ref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			    struct fs_path *p, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct fs_path *pt = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	ret = fs_path_copy(pt, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/* we want the first only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * Retrieve the first path of an inode. If an inode has more then one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * ref/hardlink, this is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int get_inode_path(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			  u64 ino, struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	struct btrfs_key key, found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct btrfs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	p = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	fs_path_reset(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	key.objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	key.type = BTRFS_INODE_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (found_key.objectid != ino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	    (found_key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	     found_key.type != BTRFS_INODE_EXTREF_KEY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	ret = iterate_inode_ref(root, p, &found_key, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				__copy_first_ref, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	btrfs_free_path(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct backref_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	struct send_ctx *sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	/* number of total found references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	u64 found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 * used for clones found in send_root. clones found behind cur_objectid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	 * and cur_offset are not considered as allowed clones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	u64 cur_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	u64 cur_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	/* may be truncated in case it's the last extent in a file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	u64 extent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	/* data offset in the file extent item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	u64 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* Just to check for bugs in backref resolving */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int found_itself;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static int __clone_root_cmp_bsearch(const void *key, const void *elt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	u64 root = (u64)(uintptr_t)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	struct clone_root *cr = (struct clone_root *)elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (root < cr->root->root_key.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (root > cr->root->root_key.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int __clone_root_cmp_sort(const void *e1, const void *e2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct clone_root *cr1 = (struct clone_root *)e1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct clone_root *cr2 = (struct clone_root *)e2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  * Called for every backref that is found for the current extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * Results are collected in sctx->clone_roots->ino/offset/found_refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct backref_ctx *bctx = ctx_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct clone_root *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	/* First check if the root is in the list of accepted clone sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			bctx->sctx->clone_roots_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			sizeof(struct clone_root),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			__clone_root_cmp_bsearch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (found->root == bctx->sctx->send_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	    ino == bctx->cur_objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	    offset == bctx->cur_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		bctx->found_itself = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * Make sure we don't consider clones from send_root that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 * behind the current inode/offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (found->root == bctx->sctx->send_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		 * If the source inode was not yet processed we can't issue a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		 * clone operation, as the source extent does not exist yet at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		 * the destination of the stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		if (ino > bctx->cur_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		 * We clone from the inode currently being sent as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 * source extent is already processed, otherwise we could try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		 * to clone from an extent that does not exist yet at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		 * destination of the stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		if (ino == bctx->cur_objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		    offset + bctx->extent_len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		    bctx->sctx->cur_inode_next_write_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	bctx->found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	found->found_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (ino < found->ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		found->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		found->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	} else if (found->ino == ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		 * same extent found more then once in the same file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		if (found->offset > offset + bctx->extent_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			found->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * Given an inode, offset and extent item, it finds a good clone for a clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * instruction. Returns -ENOENT when none could be found. The function makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * sure that the returned clone is usable at the point where sending is at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * moment. This means, that no clones are accepted which lie behind the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * inode+offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  * path must point to the extent item when called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) static int find_extent_clone(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			     struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			     u64 ino, u64 data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			     u64 ino_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			     struct clone_root **found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	int extent_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	u64 logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	u64 disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	u64 extent_item_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	u64 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	struct extent_buffer *eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct backref_ctx *backref_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	struct clone_root *cur_clone_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	struct btrfs_path *tmp_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	int compressed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	tmp_path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (!tmp_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	/* We only use this path under the commit sem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	tmp_path->need_commit_sem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	if (!backref_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (data_offset >= ino_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		 * There may be extents that lie behind the file's size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		 * I at least had this in combination with snapshotting while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		 * writing large files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	fi = btrfs_item_ptr(eb, path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	extent_type = btrfs_file_extent_type(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	compressed = btrfs_file_extent_compression(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	num_bytes = btrfs_file_extent_num_bytes(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (disk_byte == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	logical = disk_byte + btrfs_file_extent_offset(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	ret = extent_from_logical(fs_info, disk_byte, tmp_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				  &found_key, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			    struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	 * Backreference walking (iterate_extent_inodes() below) is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	 * too expensive when an extent has a large number of references, both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	 * in time spent and used memory. So for now just fallback to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	 * operations instead of clone operations when an extent has more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	 * a certain amount of references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	btrfs_release_path(tmp_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 * Setup the clone roots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		cur_clone_root = sctx->clone_roots + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		cur_clone_root->ino = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		cur_clone_root->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		cur_clone_root->found_refs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	backref_ctx->sctx = sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	backref_ctx->found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	backref_ctx->cur_objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	backref_ctx->cur_offset = data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	backref_ctx->found_itself = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	backref_ctx->extent_len = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	 * For non-compressed extents iterate_extent_inodes() gives us extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	 * offsets that already take into account the data offset, but not for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 * compressed extents, since the offset is logical and not relative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	 * the physical extent locations. We must take this into account to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	 * avoid sending clone offsets that go beyond the source file's size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	 * which would result in the clone ioctl failing with -EINVAL on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	 * receiving end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (compressed == BTRFS_COMPRESS_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		backref_ctx->data_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	 * The last extent of a file may be too large due to page alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	 * We need to adjust extent_len in this case so that the checks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	 * __iterate_backrefs work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (data_offset + num_bytes >= ino_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		backref_ctx->extent_len = ino_size - data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	 * Now collect all backrefs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	if (compressed == BTRFS_COMPRESS_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		extent_item_pos = logical - found_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		extent_item_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	ret = iterate_extent_inodes(fs_info, found_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 				    extent_item_pos, 1, __iterate_backrefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 				    backref_ctx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (!backref_ctx->found_itself) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		/* found a bug in backref code? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			  "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			  ino, data_offset, disk_byte, found_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		    "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		    data_offset, ino, num_bytes, logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (!backref_ctx->found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		btrfs_debug(fs_info, "no clones found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	cur_clone_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		if (sctx->clone_roots[i].found_refs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			if (!cur_clone_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 				cur_clone_root = sctx->clone_roots + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			else if (sctx->clone_roots[i].root == sctx->send_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 				/* prefer clones from send_root over others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 				cur_clone_root = sctx->clone_roots + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	if (cur_clone_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		*found = cur_clone_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	btrfs_free_path(tmp_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	kfree(backref_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int read_symlink(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			u64 ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			struct fs_path *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	u8 compression;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	key.objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		 * An empty symlink inode. Can happen in rare error paths when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		 * creating a symlink (transaction committed before the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		 * eviction handler removed the symlink inode items and a crash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		 * happened in between or the subvol was snapshoted in between).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		 * Print an informative message to dmesg/syslog so that the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		 * can delete the symlink.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		btrfs_err(root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			  "Found empty symlink inode %llu at root %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			  ino, root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	type = btrfs_file_extent_type(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	compression = btrfs_file_extent_compression(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	BUG_ON(compression);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	off = btrfs_file_extent_inline_start(ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  * Helper function to generate a file name that is unique in the root of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  * send_root and parent_root. This is used to generate names for orphan inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static int gen_unique_name(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			   u64 ino, u64 gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			   struct fs_path *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	char tmp[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	u64 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				ino, gen, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		ASSERT(len < sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		di = btrfs_lookup_dir_item(NULL, sctx->send_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 				path, BTRFS_FIRST_FREE_OBJECTID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 				tmp, strlen(tmp), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		if (IS_ERR(di)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			ret = PTR_ERR(di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		if (di) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			/* not unique, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		if (!sctx->parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			/* unique */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				path, BTRFS_FIRST_FREE_OBJECTID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 				tmp, strlen(tmp), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		if (IS_ERR(di)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			ret = PTR_ERR(di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		if (di) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			/* not unique, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		/* unique */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	ret = fs_path_add(dest, tmp, strlen(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) enum inode_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	inode_state_no_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	inode_state_will_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	inode_state_did_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	inode_state_will_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	inode_state_did_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	int left_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	int right_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	u64 left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	u64 right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	left_ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (!sctx->parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		right_ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 				NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		right_ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (!left_ret && !right_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		if (left_gen == gen && right_gen == gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			ret = inode_state_no_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		} else if (left_gen == gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 				ret = inode_state_did_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 				ret = inode_state_will_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		} else if (right_gen == gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 				ret = inode_state_did_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 				ret = inode_state_will_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		} else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	} else if (!left_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		if (left_gen == gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 				ret = inode_state_did_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 				ret = inode_state_will_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	} else if (!right_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		if (right_gen == gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 				ret = inode_state_did_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 				ret = inode_state_will_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	if (ino == BTRFS_FIRST_FREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	ret = get_cur_inode_state(sctx, ino, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	if (ret == inode_state_no_change ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	    ret == inode_state_did_create ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	    ret == inode_state_will_delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * Helper function to lookup a dir item in a dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static int lookup_dir_item_inode(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				 u64 dir, const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				 u64 *found_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				 u8 *found_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	di = btrfs_lookup_dir_item(NULL, root, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			dir, name, name_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	if (IS_ERR_OR_NULL(di)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		ret = di ? PTR_ERR(di) : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	if (key.type == BTRFS_ROOT_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	*found_inode = key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	*found_type = btrfs_dir_type(path->nodes[0], di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)  * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)  * generation of the parent dir and the name of the dir entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static int get_first_ref(struct btrfs_root *root, u64 ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			 u64 *dir, u64 *dir_gen, struct fs_path *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	u64 parent_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	key.objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	key.type = BTRFS_INODE_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 				path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (ret || found_key.objectid != ino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	    (found_key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	     found_key.type != BTRFS_INODE_EXTREF_KEY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	if (found_key.type == BTRFS_INODE_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		struct btrfs_inode_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 				      struct btrfs_inode_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		len = btrfs_inode_ref_name_len(path->nodes[0], iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 						     (unsigned long)(iref + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 						     len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		parent_dir = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 					struct btrfs_inode_extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		len = btrfs_inode_extref_name_len(path->nodes[0], extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 					(unsigned long)&extref->name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	if (dir_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 				     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	*dir = parent_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static int is_first_ref(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			u64 ino, u64 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 			const char *name, int name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	struct fs_path *tmp_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	u64 tmp_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	tmp_name = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	if (!tmp_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	ret = !memcmp(tmp_name->start, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	fs_path_free(tmp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  * Used by process_recorded_refs to determine if a new ref would overwrite an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  * already existing ref. In case it detects an overwrite, it returns the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * inode/gen in who_ino/who_gen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  * When an overwrite is detected, process_recorded_refs does proper orphanizing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * to make sure later references to the overwritten inode are possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  * Orphanizing is however only required for the first ref of an inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  * process_recorded_refs does an additional is_first_ref check to see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  * orphanizing is really required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			      const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			      u64 *who_ino, u64 *who_gen, u64 *who_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	u64 other_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	u8 other_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if (!sctx->parent_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	ret = is_inode_existent(sctx, dir, dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	 * If we have a parent root we need to verify that the parent dir was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	 * not deleted and then re-created, if it was then we have no overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	 * and we can just unlink this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 				     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		if (gen != dir_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			&other_inode, &other_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	 * Check if the overwritten ref was already processed. If yes, the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	 * was already unlinked/moved, so we can safely assume that we will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	 * overwrite anything at this point in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (other_inode > sctx->send_progress ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	    is_waiting_for_move(sctx, other_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		ret = get_inode_info(sctx->parent_root, other_inode, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 				who_gen, who_mode, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		*who_ino = other_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)  * Checks if the ref was overwritten by an already processed inode. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)  * used by __get_cur_name_and_parent to find out if the ref was orphanized and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)  * thus the orphan name needs be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)  * process_recorded_refs also uses it to avoid unlinking of refs that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)  * overwritten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) static int did_overwrite_ref(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			    u64 dir, u64 dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			    u64 ino, u64 ino_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			    const char *name, int name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	u64 ow_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	u8 other_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (!sctx->parent_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	ret = is_inode_existent(sctx, dir, dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (dir != BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 				     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		if (gen != dir_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	/* check if the ref was overwritten by another ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 			&ow_inode, &other_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		/* was never and will never be overwritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	if (ow_inode == ino && gen == ino_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	 * We know that it is or will be overwritten. Check this now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	 * The current inode being processed might have been the one that caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	 * inode 'ino' to be orphanized, therefore check if ow_inode matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	 * the current inode being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if ((ow_inode < sctx->send_progress) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	    (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	     gen == sctx->cur_inode_gen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)  * that got overwritten. This is used by process_recorded_refs to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  * if it has to use the path as returned by get_cur_path or the orphan name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	struct fs_path *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	u64 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	if (!sctx->parent_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	name = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			name->start, fs_path_len(name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	fs_path_free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)  * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)  * so we need to do some special handling in case we have clashes. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)  * takes care of this with the help of name_cache_entry::radix_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)  * In case of error, nce is kfreed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) static int name_cache_insert(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			     struct name_cache_entry *nce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	struct list_head *nce_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	nce_head = radix_tree_lookup(&sctx->name_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			(unsigned long)nce->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if (!nce_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		if (!nce_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 			kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		INIT_LIST_HEAD(nce_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			kfree(nce_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 			kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	list_add_tail(&nce->radix_list, nce_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	list_add_tail(&nce->list, &sctx->name_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	sctx->name_cache_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static void name_cache_delete(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			      struct name_cache_entry *nce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct list_head *nce_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	nce_head = radix_tree_lookup(&sctx->name_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			(unsigned long)nce->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	if (!nce_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		btrfs_err(sctx->send_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	      "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 			nce->ino, sctx->name_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	list_del(&nce->radix_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	list_del(&nce->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	sctx->name_cache_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	 * We may not get to the final release of nce_head if the lookup fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	if (nce_head && list_empty(nce_head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		kfree(nce_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 						    u64 ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	struct list_head *nce_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	struct name_cache_entry *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	if (!nce_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	list_for_each_entry(cur, nce_head, radix_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		if (cur->ino == ino && cur->gen == gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			return cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * Removes the entry from the list and adds it back to the end. This marks the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  * entry as recently used so that name_cache_clean_unused does not remove it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	list_del(&nce->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	list_add_tail(&nce->list, &sctx->name_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * Remove some entries from the beginning of name_cache_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) static void name_cache_clean_unused(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	struct name_cache_entry *nce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		nce = list_entry(sctx->name_cache_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 				struct name_cache_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		name_cache_delete(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static void name_cache_free(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	struct name_cache_entry *nce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	while (!list_empty(&sctx->name_cache_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		nce = list_entry(sctx->name_cache_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 				struct name_cache_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		name_cache_delete(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * Used by get_cur_path for each ref up to the root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * Returns 0 if it succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * Returns 1 if the inode is not existent or got overwritten. In that case, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * is returned, parent_ino/parent_gen are not guaranteed to be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * Returns <0 in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) static int __get_cur_name_and_parent(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 				     u64 ino, u64 gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 				     u64 *parent_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 				     u64 *parent_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 				     struct fs_path *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	int nce_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	struct name_cache_entry *nce = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	 * First check if we already did a call to this function with the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	 * return the cached result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	nce = name_cache_search(sctx, ino, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	if (nce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		if (ino < sctx->send_progress && nce->need_later_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			name_cache_delete(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			nce = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			name_cache_used(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			*parent_ino = nce->parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			*parent_gen = nce->parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			ret = fs_path_add(dest, nce->name, nce->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 			ret = nce->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	 * If the inode is not existent yet, add the orphan name and return 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	 * This should only happen for the parent dir that we determine in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	 * __record_new_ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	ret = is_inode_existent(sctx, ino, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		ret = gen_unique_name(sctx, ino, gen, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		goto out_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	 * Depending on whether the inode was already processed or not, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	 * send_root or parent_root for ref lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		ret = get_first_ref(sctx->send_root, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 				    parent_ino, parent_gen, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		ret = get_first_ref(sctx->parent_root, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 				    parent_ino, parent_gen, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	 * Check if the ref was overwritten by an inode's ref that was processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	 * earlier. If yes, treat as orphan and return 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			dest->start, dest->end - dest->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		fs_path_reset(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		ret = gen_unique_name(sctx, ino, gen, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) out_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	 * Store the result of the lookup in the name cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	if (!nce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	nce->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	nce->gen = gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	nce->parent_ino = *parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	nce->parent_gen = *parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	nce->name_len = fs_path_len(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	nce->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	strcpy(nce->name, dest->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	if (ino < sctx->send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		nce->need_later_update = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		nce->need_later_update = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	nce_ret = name_cache_insert(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (nce_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		ret = nce_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	name_cache_clean_unused(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)  * Magic happens here. This function returns the first ref to an inode as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)  * would look like while receiving the stream at this point in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  * We walk the path up to the root. For every inode in between, we check if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  * was already processed/sent. If yes, we continue with the parent as found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)  * in send_root. If not, we continue with the parent as found in parent_root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)  * If we encounter an inode that was deleted at this point in time, we use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)  * inodes "orphan" name instead of the real name and stop. Same with new inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)  * that were not created yet and overwritten inodes/refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)  * When do we have orphan inodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)  * 1. When an inode is freshly created and thus no valid refs are available yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)  * 2. When a directory lost all it's refs (deleted) but still has dir items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)  *    inside which were not processed yet (pending for move/delete). If anyone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)  *    tried to get the path to the dir items, it would get a path inside that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)  *    orphan directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)  * 3. When an inode is moved around or gets new links, it may overwrite the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)  *    of an unprocessed inode. If in that case the first ref would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)  *    overwritten, the overwritten inode gets "orphanized". Later when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)  *    process this overwritten inode, it is restored at a new place by moving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)  *    the orphan inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)  * sctx->send_progress tells this function at which point in time receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)  * would be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 			struct fs_path *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	struct fs_path *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	u64 parent_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	u64 parent_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	int stop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	name = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	dest->reversed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	fs_path_reset(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		struct waiting_dir_move *wdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		fs_path_reset(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		if (is_waiting_for_rm(sctx, ino, gen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 			ret = gen_unique_name(sctx, ino, gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			ret = fs_path_add_path(dest, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		wdm = get_waiting_dir_move(sctx, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		if (wdm && wdm->orphanized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			ret = gen_unique_name(sctx, ino, gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		} else if (wdm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			ret = get_first_ref(sctx->parent_root, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 					    &parent_inode, &parent_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			ret = __get_cur_name_and_parent(sctx, ino, gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 							&parent_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 							&parent_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 				stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		ret = fs_path_add_path(dest, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		ino = parent_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		gen = parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	fs_path_free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		fs_path_unreverse(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)  * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static int send_subvol_begin(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	struct btrfs_root *send_root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	struct btrfs_root *parent_root = sctx->parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	struct btrfs_root_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	int namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	key.objectid = send_root->root_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	key.type = BTRFS_ROOT_BACKREF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				&key, path, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	if (key.type != BTRFS_ROOT_BACKREF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	    key.objectid != send_root->root_key.objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	namelen = btrfs_root_ref_name_len(leaf, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	if (parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 			    sctx->send_root->root_item.received_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 			    sctx->send_root->root_item.uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		    le64_to_cpu(sctx->send_root->root_item.ctransid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	if (parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 				     parent_root->root_item.received_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 				     parent_root->root_item.uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			    le64_to_cpu(sctx->parent_root->root_item.ctransid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	ret = get_cur_path(sctx, ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	ret = get_cur_path(sctx, ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		    ino, uid, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	ret = get_cur_path(sctx, ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	struct fs_path *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	struct btrfs_inode_item *ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	btrfs_debug(fs_info, "send_utimes %llu", ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	key.objectid = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	key.type = BTRFS_INODE_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	ret = get_cur_path(sctx, ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	/* TODO Add otime support when the otime patches get into upstream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  * a valid path yet because we did not process the refs yet. So, the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)  * is created as orphan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static int send_create_inode(struct send_ctx *sctx, u64 ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	int cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	u64 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	u64 rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	btrfs_debug(fs_info, "send_create_inode %llu", ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	if (ino != sctx->cur_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 				     NULL, NULL, &rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		gen = sctx->cur_inode_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		mode = sctx->cur_inode_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		rdev = sctx->cur_inode_rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	if (S_ISREG(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		cmd = BTRFS_SEND_C_MKFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	} else if (S_ISDIR(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		cmd = BTRFS_SEND_C_MKDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	} else if (S_ISLNK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		cmd = BTRFS_SEND_C_SYMLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		cmd = BTRFS_SEND_C_MKNOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	} else if (S_ISFIFO(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		cmd = BTRFS_SEND_C_MKFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	} else if (S_ISSOCK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		cmd = BTRFS_SEND_C_MKSOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 				(int)(mode & S_IFMT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	ret = begin_cmd(sctx, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	ret = gen_unique_name(sctx, ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	if (S_ISLNK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		fs_path_reset(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		ret = read_symlink(sctx->send_root, ino, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	} else if (S_ISCHR(mode) || S_ISBLK(mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 		TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)  * We need some special handling for inodes that get processed before the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)  * directory got created. See process_recorded_refs for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)  * This function does the check if we already created the dir out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) static int did_create_dir(struct send_ctx *sctx, u64 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	struct btrfs_key di_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	key.objectid = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	key.type = BTRFS_DIR_INDEX_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		if (slot >= btrfs_header_nritems(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			ret = btrfs_next_leaf(sctx->send_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			} else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		    found_key.type != key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		btrfs_dir_item_key_to_cpu(eb, di, &di_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		    di_key.objectid < sctx->send_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * Only creates the inode if it is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * 1. Not a directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * 2. Or a directory which was not created already due to out of order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  *    directories. See did_create_dir and process_recorded_refs for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) static int send_create_inode_if_needed(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	if (S_ISDIR(sctx->cur_inode_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		ret = did_create_dir(sctx, sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	ret = send_create_inode(sctx, sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct recorded_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	struct fs_path *full_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	u64 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	ref->full_path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	ref->name = (char *)kbasename(ref->full_path->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	ref->name_len = ref->full_path->end - ref->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)  * We need to process new refs before deleted refs, but compare_tree gives us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)  * everything mixed. So we first record all refs and later process them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)  * This function is a helper to record one ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) static int __record_ref(struct list_head *head, u64 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		      u64 dir_gen, struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	struct recorded_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	ref = kmalloc(sizeof(*ref), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	ref->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	ref->dir_gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	set_ref_path(ref, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	list_add_tail(&ref->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) static int dup_ref(struct recorded_ref *ref, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	struct recorded_ref *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	new = kmalloc(sizeof(*ref), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	new->dir = ref->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	new->dir_gen = ref->dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	new->full_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	INIT_LIST_HEAD(&new->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	list_add_tail(&new->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) static void __free_recorded_refs(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	struct recorded_ref *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		cur = list_entry(head->next, struct recorded_ref, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		fs_path_free(cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 		list_del(&cur->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		kfree(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static void free_recorded_refs(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	__free_recorded_refs(&sctx->new_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	__free_recorded_refs(&sctx->deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  * Renames/moves a file/dir to its orphan name. Used when the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)  * ref of an unprocessed inode gets overwritten and for all non empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)  * directories.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 			  struct fs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	struct fs_path *orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	orphan = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	if (!orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	ret = gen_unique_name(sctx, ino, gen, orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	ret = send_rename(sctx, path, orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	fs_path_free(orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 						   u64 dir_ino, u64 dir_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	struct rb_node **p = &sctx->orphan_dirs.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	struct orphan_dir_info *entry, *odi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		entry = rb_entry(parent, struct orphan_dir_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		if (dir_ino < entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		else if (dir_ino > entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		else if (dir_gen < entry->gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		else if (dir_gen > entry->gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	odi = kmalloc(sizeof(*odi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	if (!odi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	odi->ino = dir_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	odi->gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	odi->last_dir_index_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	rb_link_node(&odi->node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	rb_insert_color(&odi->node, &sctx->orphan_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	return odi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 						   u64 dir_ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	struct rb_node *n = sctx->orphan_dirs.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	struct orphan_dir_info *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 		entry = rb_entry(n, struct orphan_dir_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		if (dir_ino < entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 			n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		else if (dir_ino > entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 			n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		else if (gen < entry->gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 			n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 		else if (gen > entry->gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	return odi != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) static void free_orphan_dir_info(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 				 struct orphan_dir_info *odi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	if (!odi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	rb_erase(&odi->node, &sctx->orphan_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	kfree(odi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)  * Returns 1 if a directory can be removed at this point in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)  * We check this by iterating all dir items and checking if the inode behind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)  * the dir item was already processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		     u64 send_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	struct btrfs_root *root = sctx->parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	struct btrfs_key loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	struct orphan_dir_info *odi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	 * Don't try to rmdir the top/root subvolume dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	if (dir == BTRFS_FIRST_FREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	key.objectid = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	key.type = BTRFS_DIR_INDEX_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	odi = get_orphan_dir_info(sctx, dir, dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (odi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		key.offset = odi->last_dir_index_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		struct waiting_dir_move *dm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 				      path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		    found_key.type != key.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		di = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 				struct btrfs_dir_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		dm = get_waiting_dir_move(sctx, loc.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		if (dm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 			odi = add_orphan_dir_info(sctx, dir, dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 			if (IS_ERR(odi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 				ret = PTR_ERR(odi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 			odi->gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 			odi->last_dir_index_offset = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 			dm->rmdir_ino = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 			dm->rmdir_gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 		if (loc.objectid > send_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 			odi = add_orphan_dir_info(sctx, dir, dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 			if (IS_ERR(odi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 				ret = PTR_ERR(odi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 			odi->gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 			odi->last_dir_index_offset = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	free_orphan_dir_info(sctx, odi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	return entry != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	struct waiting_dir_move *entry, *dm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	dm = kmalloc(sizeof(*dm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	if (!dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	dm->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	dm->rmdir_ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	dm->rmdir_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	dm->orphanized = orphanized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		entry = rb_entry(parent, struct waiting_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		if (ino < entry->ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		} else if (ino > entry->ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 			kfree(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	rb_link_node(&dm->node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static struct waiting_dir_move *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	struct rb_node *n = sctx->waiting_dir_moves.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	struct waiting_dir_move *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		entry = rb_entry(n, struct waiting_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		if (ino < entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 			n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		else if (ino > entry->ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 			n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) static void free_waiting_dir_move(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 				  struct waiting_dir_move *dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	if (!dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	rb_erase(&dm->node, &sctx->waiting_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	kfree(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) static int add_pending_dir_move(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 				u64 ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 				u64 ino_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 				u64 parent_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 				struct list_head *new_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 				struct list_head *deleted_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 				const bool is_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	struct pending_dir_move *entry = NULL, *pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	struct recorded_ref *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	int exists = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	pm = kmalloc(sizeof(*pm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	pm->parent_ino = parent_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	pm->ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	pm->gen = ino_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	INIT_LIST_HEAD(&pm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	INIT_LIST_HEAD(&pm->update_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	RB_CLEAR_NODE(&pm->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		entry = rb_entry(parent, struct pending_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 		if (parent_ino < entry->parent_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 			p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		} else if (parent_ino > entry->parent_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 			p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 			exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	list_for_each_entry(cur, deleted_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		ret = dup_ref(cur, &pm->update_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	list_for_each_entry(cur, new_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		ret = dup_ref(cur, &pm->update_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	if (exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		list_add_tail(&pm->list, &entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 		rb_link_node(&pm->node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		rb_insert_color(&pm->node, &sctx->pending_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		__free_recorded_refs(&pm->update_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 		kfree(pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 						      u64 parent_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	struct rb_node *n = sctx->pending_dir_moves.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	struct pending_dir_move *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		entry = rb_entry(n, struct pending_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 		if (parent_ino < entry->parent_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 			n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 		else if (parent_ino > entry->parent_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 			n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) static int path_loop(struct send_ctx *sctx, struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 		     u64 ino, u64 gen, u64 *ancestor_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	u64 parent_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	u64 parent_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	u64 start_ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 	*ancestor_ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	while (ino != BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 		fs_path_reset(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 		if (is_waiting_for_rm(sctx, ino, gen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		if (is_waiting_for_move(sctx, ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 			if (*ancestor_ino == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 				*ancestor_ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 			ret = get_first_ref(sctx->parent_root, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 					    &parent_inode, &parent_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 			ret = __get_cur_name_and_parent(sctx, ino, gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 							&parent_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 							&parent_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 		if (parent_inode == start_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 			if (*ancestor_ino == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 				*ancestor_ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		ino = parent_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		gen = parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	struct fs_path *from_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	struct fs_path *to_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	struct fs_path *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	u64 orig_progress = sctx->send_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	struct recorded_ref *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	u64 parent_ino, parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	struct waiting_dir_move *dm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	u64 rmdir_ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	u64 rmdir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	u64 ancestor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 	bool is_orphan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	name = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	from_path = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	if (!name || !from_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	dm = get_waiting_dir_move(sctx, pm->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	ASSERT(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	rmdir_ino = dm->rmdir_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	rmdir_gen = dm->rmdir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	is_orphan = dm->orphanized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	free_waiting_dir_move(sctx, dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	if (is_orphan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		ret = gen_unique_name(sctx, pm->ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 				      pm->gen, from_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 		ret = get_first_ref(sctx->parent_root, pm->ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 				    &parent_ino, &parent_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		ret = get_cur_path(sctx, parent_ino, parent_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 				   from_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		ret = fs_path_add_path(from_path, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	sctx->send_progress = sctx->cur_ino + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		LIST_HEAD(deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 		ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 					   &pm->update_refs, &deleted_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 					   is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		if (rmdir_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 			dm = get_waiting_dir_move(sctx, pm->ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 			ASSERT(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 			dm->rmdir_ino = rmdir_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 			dm->rmdir_gen = rmdir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	fs_path_reset(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	to_path = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	ret = send_rename(sctx, from_path, to_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	if (rmdir_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		struct orphan_dir_info *odi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		if (!odi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 			/* already deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 			goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		gen = odi->gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 			goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		name = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		ret = get_cur_path(sctx, rmdir_ino, gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		ret = send_rmdir(sctx, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	ret = send_utimes(sctx, pm->ino, pm->gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	 * After rename/move, need to update the utimes of both new parent(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 	 * and old parent(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	list_for_each_entry(cur, &pm->update_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		 * The parent inode might have been deleted in the send snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 		ret = get_inode_info(sctx->send_root, cur->dir, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 				     NULL, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		ret = send_utimes(sctx, cur->dir, cur->dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	fs_path_free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	fs_path_free(from_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	fs_path_free(to_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	sctx->send_progress = orig_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	if (!list_empty(&m->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		list_del(&m->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	if (!RB_EMPTY_NODE(&m->node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		rb_erase(&m->node, &sctx->pending_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	__free_recorded_refs(&m->update_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) static void tail_append_pending_moves(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 				      struct pending_dir_move *moves,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 				      struct list_head *stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	if (list_empty(&moves->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 		list_add_tail(&moves->list, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		list_splice_init(&moves->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		list_add_tail(&moves->list, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		list_splice_tail(&list, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	if (!RB_EMPTY_NODE(&moves->node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 		rb_erase(&moves->node, &sctx->pending_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		RB_CLEAR_NODE(&moves->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) static int apply_children_dir_moves(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	struct pending_dir_move *pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	struct list_head stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	u64 parent_ino = sctx->cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	pm = get_pending_dir_moves(sctx, parent_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	if (!pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	INIT_LIST_HEAD(&stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	tail_append_pending_moves(sctx, pm, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	while (!list_empty(&stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		pm = list_first_entry(&stack, struct pending_dir_move, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		parent_ino = pm->ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		ret = apply_dir_move(sctx, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		free_pending_move(sctx, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		pm = get_pending_dir_moves(sctx, parent_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		if (pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 			tail_append_pending_moves(sctx, pm, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	while (!list_empty(&stack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		pm = list_first_entry(&stack, struct pending_dir_move, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 		free_pending_move(sctx, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)  * We might need to delay a directory rename even when no ancestor directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)  * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)  * renamed. This happens when we rename a directory to the old name (the name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)  * in the parent root) of some other unrelated directory that got its rename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)  * delayed due to some ancestor with higher number that got renamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)  * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)  * Parent snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)  * .                                       (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)  * |---- a/                                (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)  * |     |---- file                        (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)  * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)  * |---- b/                                (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)  * |---- c/                                (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)  * Send snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)  * .                                       (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)  * |---- a/                                (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)  * |---- x/                                (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)  *       |---- y/                          (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)  *             |----- file                 (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)  * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)  * from 'a' to 'x/y' happening first, which in turn depends on the rename of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)  * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)  * must issue is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)  * 1 - rename 259 from 'c' to 'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)  * 2 - rename 257 from 'a' to 'x/y'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)  * 3 - rename 258 from 'b' to 'a'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)  * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)  * be done right away and < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) static int wait_for_dest_dir_move(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 				  struct recorded_ref *parent_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 				  const bool is_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	struct btrfs_key di_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	u64 left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	u64 right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	struct waiting_dir_move *wdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	key.objectid = parent_ref->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	key.type = BTRFS_DIR_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 	} else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 				       parent_ref->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	if (!di) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	 * di_key.objectid has the number of the inode that has a dentry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	 * parent directory with the same name that sctx->cur_ino is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	 * renamed to. We need to check if that inode is in the send root as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	 * well and if it is currently marked as an inode with a pending rename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	 * if it is, we need to delay the rename of sctx->cur_ino as well, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	 * that it happens after that other inode is renamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	if (di_key.type != BTRFS_INODE_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 	ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			     &left_gen, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 			     &right_gen, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	/* Different inode, no need to delay the rename of sctx->cur_ino */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	if (right_gen != left_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	wdm = get_waiting_dir_move(sctx, di_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	if (wdm && !wdm->orphanized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		ret = add_pending_dir_move(sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 					   sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 					   sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 					   di_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 					   &sctx->new_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 					   &sctx->deleted_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 					   is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)  * Check if inode ino2, or any of its ancestors, is inode ino1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)  * Return 1 if true, 0 if false and < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) static int check_ino_in_path(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 			     const u64 ino1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 			     const u64 ino1_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 			     const u64 ino2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 			     const u64 ino2_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 			     struct fs_path *fs_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	u64 ino = ino2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	if (ino1 == ino2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		return ino1_gen == ino2_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	while (ino > BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 		u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 		u64 parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		fs_path_reset(fs_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		if (parent == ino1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 			return parent_gen == ino1_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 		ino = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)  * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)  * possible path (in case ino2 is not a directory and has multiple hard links).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)  * Return 1 if true, 0 if false and < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) static int is_ancestor(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 		       const u64 ino1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 		       const u64 ino1_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		       const u64 ino2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		       struct fs_path *fs_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	bool free_fs_path = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	if (!fs_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 		fs_path = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		if (!fs_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 		free_fs_path = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	key.objectid = ino2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	key.type = BTRFS_INODE_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		u32 cur_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 		u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 			if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 		if (key.objectid != ino2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		if (key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		    key.type != BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 		item_size = btrfs_item_size_nr(leaf, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 		while (cur_offset < item_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 			u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 			u64 parent_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 			if (key.type == BTRFS_INODE_EXTREF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 				unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 				struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 				ptr = btrfs_item_ptr_offset(leaf, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 				extref = (struct btrfs_inode_extref *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 					(ptr + cur_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 				parent = btrfs_inode_extref_parent(leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 								   extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 				cur_offset += sizeof(*extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 				cur_offset += btrfs_inode_extref_name_len(leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 								  extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 				parent = key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 				cur_offset = item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 			ret = get_inode_info(root, parent, NULL, &parent_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 					     NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 			ret = check_ino_in_path(root, ino1, ino1_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 						parent, parent_gen, fs_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	if (free_fs_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 		fs_path_free(fs_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) static int wait_for_parent_move(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 				struct recorded_ref *parent_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 				const bool is_orphan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	u64 ino = parent_ref->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	u64 ino_gen = parent_ref->dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	u64 parent_ino_before, parent_ino_after;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	struct fs_path *path_before = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	struct fs_path *path_after = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	int len1, len2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	path_after = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	path_before = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	if (!path_after || !path_before) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 	 * Our current directory inode may not yet be renamed/moved because some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	 * ancestor (immediate or not) has to be renamed/moved first. So find if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	 * such ancestor exists and make sure our own rename/move happens after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	 * that ancestor is processed to avoid path build infinite loops (done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	 * at get_cur_path()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	while (ino > BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 		u64 parent_ino_after_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 		if (is_waiting_for_move(sctx, ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 			 * If the current inode is an ancestor of ino in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 			 * parent root, we need to delay the rename of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 			 * current inode, otherwise don't delayed the rename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 			 * because we can end up with a circular dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 			 * of renames, resulting in some directories never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 			 * getting the respective rename operations issued in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 			 * the send stream or getting into infinite path build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 			 * loops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 			ret = is_ancestor(sctx->parent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 					  sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 					  ino, path_before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		fs_path_reset(path_before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		fs_path_reset(path_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 		ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 				    &parent_ino_after_gen, path_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 		ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 				    NULL, path_before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		if (ret < 0 && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		} else if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		len1 = fs_path_len(path_before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		len2 = fs_path_len(path_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		if (ino > sctx->cur_ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 		    (parent_ino_before != parent_ino_after || len1 != len2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 		     memcmp(path_before->start, path_after->start, len1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 			u64 parent_ino_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 			ret = get_inode_info(sctx->parent_root, ino, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 					     &parent_ino_gen, NULL, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 					     NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 			if (ino_gen == parent_ino_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 				ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		ino = parent_ino_after;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 		ino_gen = parent_ino_after_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	fs_path_free(path_before);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	fs_path_free(path_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 		ret = add_pending_dir_move(sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 					   sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 					   sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 					   ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 					   &sctx->new_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 					   &sctx->deleted_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 					   is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 	struct fs_path *new_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	 * Our reference's name member points to its full_path member string, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	 * we use here a new path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	new_path = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 	if (!new_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 		fs_path_free(new_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	ret = fs_path_add(new_path, ref->name, ref->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 		fs_path_free(new_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	fs_path_free(ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	set_ref_path(ref, new_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)  * When processing the new references for an inode we may orphanize an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)  * directory inode because its old name conflicts with one of the new references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)  * of the current inode. Later, when processing another new reference of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)  * inode, we might need to orphanize another inode, but the path we have in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)  * reference reflects the pre-orphanization name of the directory we previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)  * orphanized. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)  * parent snapshot looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)  * .                                     (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)  * |----- f1                             (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)  * |----- f2                             (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)  * |----- d1/                            (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)  *        |----- d2/                     (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)  * send snapshot looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)  * .                                     (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)  * |----- d1                             (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)  * |----- f2/                            (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842)  *        |----- f2_link/                (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)  *        |       |----- f1              (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)  *        |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)  *        |----- d2                      (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847)  * When processing inode 257 we compute the name for inode 259 as "d1", and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)  * cache it in the name cache. Later when we start processing inode 258, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)  * collecting all its new references we set a full path of "d1/d2" for its new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)  * reference with name "d2". When we start processing the new references we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851)  * start by processing the new reference with name "d1", and this results in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)  * orphanizing inode 259, since its old reference causes a conflict. Then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)  * move on the next new reference, with name "d2", and we find out we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)  * orphanize inode 260, as its old reference conflicts with ours - but for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)  * orphanization we use a source path corresponding to the path we stored in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)  * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857)  * receiver fail since the path component "d1/" no longer exists, it was renamed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858)  * to "o259-6-0/" when processing the previous new reference. So in this case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)  * must recompute the path in the new reference and use it for the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)  * orphanization operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 	name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	fs_path_reset(ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	ret = fs_path_add(ref->full_path, name, ref->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 	/* Update the reference's base name pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 	set_ref_path(ref, ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)  * This does all the move/link/unlink/rmdir magic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	struct recorded_ref *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	struct recorded_ref *cur2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	struct list_head check_dirs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 	struct fs_path *valid_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	u64 ow_inode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	u64 ow_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	u64 ow_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 	int did_overwrite = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 	int is_orphan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	u64 last_dir_ino_rm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	bool can_rename = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 	bool orphanized_dir = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 	bool orphanized_ancestor = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 	btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	 * This should never happen as the root dir always has the same ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 	 * which is always '..'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 	BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	INIT_LIST_HEAD(&check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	valid_path = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	if (!valid_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	 * First, check if the first ref of the current inode was overwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	 * before. If yes, we know that the current inode was already orphanized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	 * and thus use the orphan name. If not, we can use get_cur_path to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	 * get the path of the first ref as it would like while receiving at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	 * this point in time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	 * New inodes are always orphan at the beginning, so force to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	 * orphan name in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 	 * The first ref is stored in valid_path and will be updated if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 	 * gets moved around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 	if (!sctx->cur_inode_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 		ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 				sctx->cur_inode_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 			did_overwrite = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	if (sctx->cur_inode_new || did_overwrite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 		ret = gen_unique_name(sctx, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 				sctx->cur_inode_gen, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		is_orphan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 				valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	 * Before doing any rename and link operations, do a first pass on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	 * new references to orphanize any unprocessed inodes that may have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	 * reference that conflicts with one of the new references of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	 * inode. This needs to happen first because a new reference may conflict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 	 * with the old reference of a parent directory, so we must make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	 * that the path used for link and rename commands don't use an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 	 * orphanized name when an ancestor was not yet orphanized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	 * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	 * Parent snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	 * .                                                      (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	 * |----- testdir/                                        (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	 * |          |----- a                                    (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	 * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 	 * |----- b                                               (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	 * Send snapshot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	 * .                                                      (ino 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	 * |----- testdir_2/                                      (ino 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	 * |          |----- a                                    (ino 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	 * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	 * |----- testdir                                         (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	 * |----- b                                               (ino 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	 * |----- b2                                              (ino 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 	 * Processing the new reference for inode 257 with name "b" may happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 	 * before processing the new reference with name "testdir". If so, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 	 * must make sure that by the time we send a link command to create the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	 * hard link "b", inode 259 was already orphanized, since the generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	 * path in "valid_path" already contains the orphanized name for 259.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	 * We are processing inode 257, so only later when processing 259 we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	 * the rename operation to change its temporary (orphanized) name to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	 * "testdir_2".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	list_for_each_entry(cur, &sctx->new_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		if (ret == inode_state_will_create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		 * Check if this new ref would overwrite the first ref of another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 		 * unprocessed inode. If yes, orphanize the overwritten inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 		 * If we find an overwritten ref that is not the first ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 		 * simply unlink it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 		ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 				cur->name, cur->name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 				&ow_inode, &ow_gen, &ow_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 			ret = is_first_ref(sctx->parent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 					   ow_inode, cur->dir, cur->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 					   cur->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 				struct name_cache_entry *nce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 				struct waiting_dir_move *wdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 				if (orphanized_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 					ret = refresh_ref_path(sctx, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 					if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 				ret = orphanize_inode(sctx, ow_inode, ow_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 						cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 				if (S_ISDIR(ow_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 					orphanized_dir = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 				 * If ow_inode has its rename operation delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 				 * make sure that its orphanized name is used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 				 * the source path when performing its rename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 				 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 				if (is_waiting_for_move(sctx, ow_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 					wdm = get_waiting_dir_move(sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 								   ow_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 					ASSERT(wdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 					wdm->orphanized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 				 * Make sure we clear our orphanized inode's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 				 * name from the name cache. This is because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 				 * inode ow_inode might be an ancestor of some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 				 * other inode that will be orphanized as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 				 * later and has an inode number greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 				 * sctx->send_progress. We need to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 				 * future name lookups from using the old name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 				 * and get instead the orphan name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 				nce = name_cache_search(sctx, ow_inode, ow_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 				if (nce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 					name_cache_delete(sctx, nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 					kfree(nce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 				 * ow_inode might currently be an ancestor of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 				 * cur_ino, therefore compute valid_path (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 				 * current path of cur_ino) again because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 				 * might contain the pre-orphanization name of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 				 * ow_inode, which is no longer valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 				ret = is_ancestor(sctx->parent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 						  ow_inode, ow_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 						  sctx->cur_ino, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 				if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 					orphanized_ancestor = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 					fs_path_reset(valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 					ret = get_cur_path(sctx, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 							   sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 							   valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 				 * If we previously orphanized a directory that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 				 * collided with a new reference that we already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 				 * processed, recompute the current path because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 				 * that directory may be part of the path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 				if (orphanized_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 					ret = refresh_ref_path(sctx, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 					if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 				ret = send_unlink(sctx, cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	list_for_each_entry(cur, &sctx->new_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 		 * We may have refs where the parent directory does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 		 * yet. This happens if the parent directories inum is higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 		 * than the current inum. To handle this case, we create the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 		 * parent directory out of order. But we need to check if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		 * did already happen before due to other refs in the same dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 		if (ret == inode_state_will_create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 			 * First check if any of the current inodes refs did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 			 * already create the dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 			list_for_each_entry(cur2, &sctx->new_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 				if (cur == cur2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 				if (cur2->dir == cur->dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 					ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 			 * If that did not happen, check if a previous inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 			 * did already create the dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 				ret = did_create_dir(sctx, cur->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 			if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 				ret = send_create_inode(sctx, cur->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 			ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 			if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 				can_rename = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 				*pending_move = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 		    can_rename) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 			ret = wait_for_parent_move(sctx, cur, is_orphan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 			if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 				can_rename = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 				*pending_move = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		 * link/move the ref to the new place. If we have an orphan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		 * inode, move it and update valid_path. If not, link or move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 		 * it depending on the inode mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 		if (is_orphan && can_rename) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 			ret = send_rename(sctx, valid_path, cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 			is_orphan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 			ret = fs_path_copy(valid_path, cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 		} else if (can_rename) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 			if (S_ISDIR(sctx->cur_inode_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 				 * Dirs can't be linked, so move it. For moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 				 * dirs, we always have one new and one deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 				 * ref. The deleted ref is ignored later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 				ret = send_rename(sctx, valid_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 						  cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 				if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 					ret = fs_path_copy(valid_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 							   cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 				 * We might have previously orphanized an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 				 * which is an ancestor of our current inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 				 * so our reference's full path, which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 				 * computed before any such orphanizations, must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 				 * be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 				if (orphanized_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 					ret = update_ref_path(sctx, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 					if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 				ret = send_link(sctx, cur->full_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 						valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 		ret = dup_ref(cur, &check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 		 * Check if we can already rmdir the directory. If not,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 		 * orphanize it. For every dir item inside that gets deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 		 * later, we do this check again and rmdir it then if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 		 * See the use of check_dirs for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 		ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 				sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 			ret = send_rmdir(sctx, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		} else if (!is_orphan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 			ret = orphanize_inode(sctx, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 					sctx->cur_inode_gen, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 			is_orphan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 		list_for_each_entry(cur, &sctx->deleted_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 			ret = dup_ref(cur, &check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	} else if (S_ISDIR(sctx->cur_inode_mode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 		   !list_empty(&sctx->deleted_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 		 * We have a moved dir. Add the old parent to check_dirs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 		cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 				list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 		ret = dup_ref(cur, &check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	} else if (!S_ISDIR(sctx->cur_inode_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		 * We have a non dir inode. Go through all deleted refs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 		 * unlink them if they were not already overwritten by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 		 * inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 		list_for_each_entry(cur, &sctx->deleted_refs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 			ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 					sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 					cur->name, cur->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 			if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 				 * If we orphanized any ancestor before, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 				 * to recompute the full path for deleted names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 				 * since any such path was computed before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 				 * processed any references and orphanized any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 				 * ancestor inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 				if (orphanized_ancestor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 					ret = update_ref_path(sctx, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 					if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 				ret = send_unlink(sctx, cur->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 			ret = dup_ref(cur, &check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 		 * If the inode is still orphan, unlink the orphan. This may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		 * happen when a previous inode did overwrite the first ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 		 * of this inode and no new refs were added for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 		 * inode. Unlinking does not mean that the inode is deleted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		 * all cases. There may still be links to this inode in other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 		 * places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 		if (is_orphan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 			ret = send_unlink(sctx, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	 * We did collect all parent dirs where cur_inode was once located. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	 * now go through all these dirs and check if they are pending for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 	 * deletion and if it's finally possible to perform the rmdir now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	 * We also update the inode stats of the parent dirs here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	list_for_each_entry(cur, &check_dirs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 		 * In case we had refs into dirs that were not processed yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 		 * we don't need to do the utime and rmdir logic for these dirs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 		 * The dir will be processed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 		if (cur->dir > sctx->cur_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 		if (ret == inode_state_did_create ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 		    ret == inode_state_no_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 			/* TODO delayed utimes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 			ret = send_utimes(sctx, cur->dir, cur->dir_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		} else if (ret == inode_state_did_delete &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 			   cur->dir != last_dir_ino_rm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 			ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 					sctx->cur_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 				ret = get_cur_path(sctx, cur->dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 						   cur->dir_gen, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 				ret = send_rmdir(sctx, valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 				last_dir_ino_rm = cur->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	__free_recorded_refs(&check_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 	free_recorded_refs(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	fs_path_free(valid_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 		      void *ctx, struct list_head *refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 			NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 	ret = get_cur_path(sctx, dir, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 	ret = fs_path_add_path(p, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 	ret = __record_ref(refs, dir, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 		fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) static int __record_new_ref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 			    struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 			    void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) static int __record_deleted_ref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 				struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 				void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	return record_ref(sctx->parent_root, dir, name, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 			  &sctx->deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) static int record_new_ref(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 				sctx->cmp_key, 0, __record_new_ref, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) static int record_deleted_ref(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 				sctx->cmp_key, 0, __record_deleted_ref, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) struct find_ref_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 	u64 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	struct fs_path *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	int found_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) static int __find_iref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 		       struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 		       void *ctx_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 	struct find_ref_ctx *ctx = ctx_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 	if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 	    strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		 * To avoid doing extra lookups we'll only do this if everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		 * else matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 		ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 				     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 		if (dir_gen != ctx->dir_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 		ctx->found_idx = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) static int find_iref(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		     struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		     struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 		     u64 dir, u64 dir_gen, struct fs_path *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	struct find_ref_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 	ctx.dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 	ctx.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 	ctx.dir_gen = dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 	ctx.found_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 	ctx.root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 	ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	if (ctx.found_idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 	return ctx.found_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) static int __record_changed_new_ref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 				    struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 				    void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 			     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 	ret = find_iref(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 			sctx->cmp_key, dir, dir_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 		ret = __record_new_ref(num, dir, index, name, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) static int __record_changed_deleted_ref(int num, u64 dir, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 					struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 					void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	u64 dir_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 	ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 			     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 			dir, dir_gen, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 	if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 		ret = __record_deleted_ref(num, dir, index, name, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) static int record_changed_ref(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 			sctx->cmp_key, 0, __record_changed_new_ref, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 			sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555)  * Record and process all refs at once. Needed when an inode changes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)  * generation number, which means that it was deleted and recreated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) static int process_all_refs(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 			    enum btrfs_compare_tree_result cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 	iterate_inode_ref_t cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 	int pending_move = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 	if (cmd == BTRFS_COMPARE_TREE_NEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 		root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 		cb = __record_new_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	} else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 		root = sctx->parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 		cb = __record_deleted_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 		btrfs_err(sctx->send_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 				"Wrong command %d in process_all_refs", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	key.objectid = sctx->cmp_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 	key.type = BTRFS_INODE_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 		eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 		if (slot >= btrfs_header_nritems(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 		btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 		if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 		    (found_key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 		     found_key.type != BTRFS_INODE_EXTREF_KEY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 		ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 	 * We don't actually care about pending_move as we are simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 	 * re-creating this inode and will be rename'ing it into place once we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 	 * rename the parent directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 	ret = process_recorded_refs(sctx, &pending_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) static int send_set_xattr(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 			  struct fs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 			  const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 			  const char *data, int data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 	ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) static int send_remove_xattr(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 			  struct fs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 			  const char *name, int name_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) 	ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) static int __process_new_xattr(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 			       const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 			       const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 			       u8 type, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 	struct posix_acl_xattr_header dummy_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	/* Capabilities are emitted by finish_inode_if_needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 	if (!strncmp(name, XATTR_NAME_CAPS, name_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 	 * This hack is needed because empty acls are stored as zero byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 	 * data in xattrs. Problem with that is, that receiving these zero byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 	 * acls will fail later. To fix this, we send a dummy acl list that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	 * only contains the version number and no entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 	if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 	    !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 		if (data_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 			dummy_acl.a_version =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 					cpu_to_le32(POSIX_ACL_XATTR_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 			data = (char *)&dummy_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 			data_len = sizeof(dummy_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 	ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 				   const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 				   const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 				   u8 type, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 	ret = send_remove_xattr(sctx, p, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) static int process_new_xattr(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 			       __process_new_xattr, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) static int process_deleted_xattr(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 	return iterate_dir_item(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 				__process_deleted_xattr, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) struct find_xattr_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 	int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 	int found_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 	char *found_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	int found_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) static int __find_xattr(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 			const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 			const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 			u8 type, void *vctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 	struct find_xattr_ctx *ctx = vctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 	if (name_len == ctx->name_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 	    strncmp(name, ctx->name, name_len) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 		ctx->found_idx = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 		ctx->found_data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 		ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 		if (!ctx->found_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) static int find_xattr(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 		      struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 		      struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 		      const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 		      char **data, int *data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	struct find_xattr_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	ctx.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 	ctx.name_len = name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 	ctx.found_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 	ctx.found_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 	ctx.found_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 	ret = iterate_dir_item(root, path, __find_xattr, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 	if (ctx.found_idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 		*data = ctx.found_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 		*data_len = ctx.found_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 		kfree(ctx.found_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 	return ctx.found_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 				       const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 				       const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 				       u8 type, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 	char *found_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 	int found_data_len  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 	ret = find_xattr(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 			 sctx->cmp_key, name, name_len, &found_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 			 &found_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 	if (ret == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 		ret = __process_new_xattr(num, di_key, name, name_len, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 				data_len, type, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 	} else if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 		if (data_len != found_data_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 		    memcmp(data, found_data, data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 			ret = __process_new_xattr(num, di_key, name, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 					data, data_len, type, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 	kfree(found_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 					   const char *name, int name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 					   const char *data, int data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 					   u8 type, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 	ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 			 name, name_len, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 	if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 		ret = __process_deleted_xattr(num, di_key, name, name_len, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 				data_len, type, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 	else if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) static int process_changed_xattr(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 			__process_changed_new_xattr, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 	ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 			__process_changed_deleted_xattr, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) static int process_all_new_xattrs(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 	root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 	key.objectid = sctx->cmp_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 	key.type = BTRFS_XATTR_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 		eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 		if (slot >= btrfs_header_nritems(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 			} else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 		btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 		if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 		    found_key.type != key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 		ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) static inline u64 max_send_read_size(const struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 	return sctx->send_max_size - SZ_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) static int put_data_header(struct send_ctx *sctx, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 	struct btrfs_tlv_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 	if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 	hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 	put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 	put_unaligned_le16(len, &hdr->tlv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 	sctx->send_size += sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 	struct btrfs_root *root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 	struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 	char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 	pgoff_t index = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 	pgoff_t last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 	unsigned pg_offset = offset_in_page(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 	ret = put_data_header(sctx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 	inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 	if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 		return PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 	last_index = (offset + len - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) 	/* initial readahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 	memset(&sctx->ra, 0, sizeof(struct file_ra_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 	file_ra_state_init(&sctx->ra, inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 	while (index <= last_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 		unsigned cur_len = min_t(unsigned, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 					 PAGE_SIZE - pg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 		page = find_lock_page(inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 		if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 			page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 				NULL, index, last_index + 1 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 			page = find_or_create_page(inode->i_mapping, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 			if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 		if (PageReadahead(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 			page_cache_async_readahead(inode->i_mapping, &sctx->ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 				NULL, page, index, last_index + 1 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 		if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 			btrfs_readpage(NULL, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 			lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 			if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 				unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 				btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 			"send: IO error at offset %llu for inode %llu root %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 					page_offset(page), sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 					sctx->send_root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 				put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 				ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 		addr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 		memcpy(sctx->send_buf + sctx->send_size, addr + pg_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 		       cur_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 		pg_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 		len -= cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 		sctx->send_size += cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)  * Read some bytes from the current inode/file and send a write command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036)  * user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 	btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 	ret = put_file_data(sctx, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)  * Send a clone command to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) static int send_clone(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 		      u64 offset, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 		      struct clone_root *clone_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 	u64 gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) 	btrfs_debug(sctx->send_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 		    "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) 		    offset, len, clone_root->root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 		    clone_root->ino, clone_root->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 	if (clone_root->root == sctx->send_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 		ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 				&gen, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 		ret = get_cur_path(sctx, clone_root->ino, gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 		ret = get_inode_path(clone_root->root, clone_root->ino, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 	 * If the parent we're using has a received_uuid set then use that as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 	 * our clone source as that is what we will look for when doing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 	 * receive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 	 * This covers the case that we create a snapshot off of a received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 	 * subvolume and then use that as the parent and try to receive on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 	 * different host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 	if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 			     clone_root->root->root_item.received_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 			     clone_root->root->root_item.uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 		    le64_to_cpu(clone_root->root->root_item.ctransid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 			clone_root->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)  * Send an update extent command to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) static int send_update_extent(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 			      u64 offset, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 	struct fs_path *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 	ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 	ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) static int send_hole(struct send_ctx *sctx, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 	struct fs_path *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 	u64 read_size = max_send_read_size(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 	u64 offset = sctx->cur_inode_last_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 	 * A hole that starts at EOF or beyond it. Since we do not yet support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 	 * fallocate (for extent preallocation and hole punching), sending a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) 	 * write of zeroes starting at EOF or beyond would later require issuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) 	 * a truncate operation which would undo the write and achieve nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 	if (offset >= sctx->cur_inode_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 	 * Don't go beyond the inode's i_size due to prealloc extents that start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 	 * after the i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 	end = min_t(u64, end, sctx->cur_inode_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 		return send_update_extent(sctx, offset, end - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 	p = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 		goto tlv_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 	while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 		u64 len = min(end - offset, read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 		ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 		TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 		ret = put_data_header(sctx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 		memset(sctx->send_buf + sctx->send_size, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 		sctx->send_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 		ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 		offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 	sctx->cur_inode_next_write_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) tlv_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 	fs_path_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) static int send_extent_data(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 			    const u64 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 			    const u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 	u64 read_size = max_send_read_size(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 	u64 sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		return send_update_extent(sctx, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 	while (sent < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 		u64 size = min(len - sent, read_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 		ret = send_write(sctx, offset + sent, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 		sent += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256)  * Search for a capability xattr related to sctx->cur_ino. If the capability is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)  * found, call send_set_xattr function to emit it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259)  * Return 0 if there isn't a capability, or when the capability was emitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260)  * successfully, or < 0 if an error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) static int send_capabilities(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 	struct fs_path *fspath = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 	struct btrfs_dir_item *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 	unsigned long data_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 	char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 	int buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) 	di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) 				XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) 	if (!di) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) 		/* There is no xattr for this inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) 	} else if (IS_ERR(di)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 		ret = PTR_ERR(di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 	buf_len = btrfs_dir_data_len(leaf, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 	fspath = fs_path_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 	buf = kmalloc(buf_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 	if (!fspath || !buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 	data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 	read_extent_buffer(leaf, buf, data_ptr, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 	ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 			strlen(XATTR_NAME_CAPS), buf, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 	fs_path_free(fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) static int clone_range(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 		       struct clone_root *clone_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 		       const u64 disk_byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 		       u64 data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 		       u64 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 		       u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 	u64 clone_src_i_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 	 * Prevent cloning from a zero offset with a length matching the sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 	 * size because in some scenarios this will make the receiver fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	 * For example, if in the source filesystem the extent at offset 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 	 * has a length of sectorsize and it was written using direct IO, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 	 * it can never be an inline extent (even if compression is enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 	 * Then this extent can be cloned in the original filesystem to a non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 	 * zero file offset, but it may not be possible to clone in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 	 * destination filesystem because it can be inlined due to compression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 	 * on the destination filesystem (as the receiver's write operations are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 	 * always done using buffered IO). The same happens when the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 	 * filesystem does not have compression enabled but the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 	 * filesystem has.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 	if (clone_root->offset == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 	    len == sctx->send_root->fs_info->sectorsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 		return send_extent_data(sctx, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 	 * There are inodes that have extents that lie behind its i_size. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) 	 * accept clones from these extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 	ret = __get_inode_info(clone_root->root, path, clone_root->ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) 			       &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 	btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 	 * We can't send a clone operation for the entire range if we find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 	 * extent items in the respective range in the source file that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 	 * refer to different extents or if we find holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 	 * So check for that and do a mix of clone and regular write/copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 	 * operations if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 	 * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 	 * mkfs.btrfs -f /dev/sda
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 	 * mount /dev/sda /mnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 	 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 	 * cp --reflink=always /mnt/foo /mnt/bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 	 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 	 * btrfs subvolume snapshot -r /mnt /mnt/snap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 	 * If when we send the snapshot and we are processing file bar (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 	 * has a higher inode number than foo) we blindly send a clone operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 	 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 	 * a file bar that matches the content of file foo - iow, doesn't match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 	 * the content from bar in the original filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 	key.objectid = clone_root->ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 	key.offset = clone_root->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 	ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 	if (ret > 0 && path->slots[0] > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 		if (key.objectid == clone_root->ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 		    key.type == BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 			path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 		struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 		int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 		struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 		u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 		u64 ext_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 		u64 clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) 		u64 clone_data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 		if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) 			ret = btrfs_next_leaf(clone_root->root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) 		btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) 		 * We might have an implicit trailing hole (NO_HOLES feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) 		 * enabled). We deal with it after leaving this loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) 		if (key.objectid != clone_root->ino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) 		    key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 		ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 		type = btrfs_file_extent_type(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) 		if (type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) 			ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 			ext_len = PAGE_ALIGN(ext_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 			ext_len = btrfs_file_extent_num_bytes(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) 		if (key.offset + ext_len <= clone_root->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 		if (key.offset > clone_root->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) 			/* Implicit hole, NO_HOLES feature enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) 			u64 hole_len = key.offset - clone_root->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 			if (hole_len > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 				hole_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 			ret = send_extent_data(sctx, offset, hole_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 			len -= hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 			if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 			offset += hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) 			clone_root->offset += hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) 			data_offset += hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 		if (key.offset >= clone_root->offset + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 		if (key.offset >= clone_src_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 		if (key.offset + ext_len > clone_src_i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 			ext_len = clone_src_i_size - key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 		clone_data_offset = btrfs_file_extent_offset(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 		if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 			clone_root->offset = key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 			if (clone_data_offset < data_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 				clone_data_offset + ext_len > data_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) 				u64 extent_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) 				extent_offset = data_offset - clone_data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 				ext_len -= extent_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) 				clone_data_offset += extent_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 				clone_root->offset += extent_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 		clone_len = min_t(u64, ext_len, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 		if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 		    clone_data_offset == data_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 			const u64 src_end = clone_root->offset + clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 			const u64 sectorsize = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 			 * We can't clone the last block, when its size is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 			 * sector size aligned, into the middle of a file. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 			 * do so, the receiver will get a failure (-EINVAL) when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 			 * trying to clone or will silently corrupt the data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 			 * the destination file if it's on a kernel without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 			 * fix introduced by commit ac765f83f1397646
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 			 * ("Btrfs: fix data corruption due to cloning of eof
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 			 * block).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 			 * So issue a clone of the aligned down range plus a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 			 * regular write for the eof block, if we hit that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 			 * Also, we use the maximum possible sector size, 64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 			 * because we don't know what's the sector size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 			 * filesystem that receives the stream, so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 			 * assume the largest possible sector size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 			if (src_end == clone_src_i_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 			    !IS_ALIGNED(src_end, sectorsize) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 			    offset + clone_len < sctx->cur_inode_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 				u64 slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) 				slen = ALIGN_DOWN(src_end - clone_root->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) 						  sectorsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 				if (slen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 					ret = send_clone(sctx, offset, slen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 							 clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 					if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 						goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 				ret = send_extent_data(sctx, offset + slen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 						       clone_len - slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 				ret = send_clone(sctx, offset, clone_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 						 clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 			ret = send_extent_data(sctx, offset, clone_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 		len -= clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 		if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 		offset += clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 		clone_root->offset += clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) 		 * If we are cloning from the file we are currently processing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 		 * and using the send root as the clone root, we must stop once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 		 * the current clone offset reaches the current eof of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 		 * at the receiver, otherwise we would issue an invalid clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 		 * operation (source range going beyond eof) and cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 		 * receiver to fail. So if we reach the current eof, bail out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 		 * and fallback to a regular write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 		if (clone_root->root == sctx->send_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 		    clone_root->ino == sctx->cur_ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 		    clone_root->offset >= sctx->cur_inode_next_write_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 		data_offset += clone_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 	if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 		ret = send_extent_data(sctx, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) static int send_write_or_clone(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) 			       struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) 			       struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) 			       struct clone_root *clone_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 	u64 offset = key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 	u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 	u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) 	end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 	if (offset >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 	if (clone_root && IS_ALIGNED(end, bs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) 		struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) 		u64 disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) 		u64 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) 		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) 				    struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) 		disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) 		data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) 		ret = clone_range(sctx, clone_root, disk_byte, data_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) 				  offset, end - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) 		ret = send_extent_data(sctx, offset, end - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 	sctx->cur_inode_next_write_offset = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) static int is_extent_unchanged(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) 			       struct btrfs_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) 			       struct btrfs_key *ekey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) 	struct btrfs_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 	struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 	u64 left_disknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 	u64 right_disknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 	u64 left_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 	u64 right_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 	u64 left_offset_fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 	u64 left_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) 	u64 right_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 	u64 left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) 	u64 right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) 	u8 left_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) 	u8 right_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) 	eb = left_path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) 	slot = left_path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) 	ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) 	left_type = btrfs_file_extent_type(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 	if (left_type != BTRFS_FILE_EXTENT_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 	left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) 	left_len = btrfs_file_extent_num_bytes(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) 	left_offset = btrfs_file_extent_offset(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 	left_gen = btrfs_file_extent_generation(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) 	 * Following comments will refer to these graphics. L is the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 	 * extents which we are checking at the moment. 1-8 are the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) 	 * extents that we iterate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 	 *       |-----L-----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 	 * |-1-|-2a-|-3-|-4-|-5-|-6-|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 	 *       |-----L-----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 	 * |--1--|-2b-|...(same as above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 	 * Alternative situation. Happens on files where extents got split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) 	 *       |-----L-----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 	 * |-----------7-----------|-6-|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 	 * Alternative situation. Happens on files which got larger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) 	 *       |-----L-----|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) 	 * |-8-|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) 	 * Nothing follows after 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) 	key.objectid = ekey->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) 	key.offset = ekey->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) 	ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) 	 * Handle special case where the right side has no extents at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) 	eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) 	slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 	btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) 	if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) 	    found_key.type != key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) 		/* If we're a hole then just pretend nothing changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) 		ret = (left_disknr) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) 	 * We're now on 2a, 2b or 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) 	key = found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 	while (key.offset < ekey->offset + left_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 		ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 		right_type = btrfs_file_extent_type(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) 		if (right_type != BTRFS_FILE_EXTENT_REG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 		    right_type != BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) 		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) 			right_len = btrfs_file_extent_ram_bytes(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) 			right_len = PAGE_ALIGN(right_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) 			right_len = btrfs_file_extent_num_bytes(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) 		 * Are we at extent 8? If yes, we know the extent is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) 		 * This may only happen on the first iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) 		if (found_key.offset + right_len <= ekey->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) 			/* If we're a hole just pretend nothing changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 			ret = (left_disknr) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) 		 * We just wanted to see if when we have an inline extent, what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) 		 * follows it is a regular extent (wanted to check the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) 		 * condition for inline extents too). This should normally not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) 		 * happen but it's possible for example when we have an inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 		 * compressed extent representing data with a size matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) 		 * the page size (currently the same as sector size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) 		right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) 		right_offset = btrfs_file_extent_offset(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) 		right_gen = btrfs_file_extent_generation(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 		left_offset_fixed = left_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) 		if (key.offset < ekey->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 			/* Fix the right offset for 2a and 7. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 			right_offset += ekey->offset - key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 			/* Fix the left offset for all behind 2a and 2b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 			left_offset_fixed += key.offset - ekey->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) 		 * Check if we have the same extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 		if (left_disknr != right_disknr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) 		    left_offset_fixed != right_offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) 		    left_gen != right_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) 		 * Go to the next extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) 		ret = btrfs_next_item(sctx->parent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 			eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) 			slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) 			btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) 		if (ret || found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) 		    found_key.type != key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) 			key.offset += right_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) 		if (found_key.offset != key.offset + right_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) 		key = found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 	 * We're now behind the left extent (treat as unchanged) or at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 	 * of the right side (treat as changed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) 	if (key.offset >= ekey->offset + left_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) static int get_last_extent(struct send_ctx *sctx, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) 	struct btrfs_root *root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) 	sctx->cur_inode_last_extent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) 	key.objectid = sctx->cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) 	key.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) 	ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) 	if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) 	sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) static int range_is_hole_in_parent(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) 				   const u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) 				   const u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) 	struct btrfs_root *root = sctx->parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) 	u64 search_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) 	key.objectid = sctx->cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) 	key.offset = search_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) 	if (ret > 0 && path->slots[0] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) 		path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) 	while (search_start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) 		struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) 		int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) 		struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) 		u64 extent_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) 		if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) 		btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) 		if (key.objectid < sctx->cur_ino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) 		    key.type < BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) 		if (key.objectid > sctx->cur_ino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) 		    key.type > BTRFS_EXTENT_DATA_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) 		    key.offset >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) 		fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) 		extent_end = btrfs_file_extent_end(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) 		if (extent_end <= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) 		if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) 			search_start = extent_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) 	ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) 			   struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) 	if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) 	if (sctx->cur_inode_last_extent == (u64)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) 		ret = get_last_extent(sctx, key->offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) 	if (path->slots[0] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) 	    sctx->cur_inode_last_extent < key->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) 		 * We might have skipped entire leafs that contained only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) 		 * file extent items for our current inode. These leafs have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) 		 * a generation number smaller (older) than the one in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) 		 * current leaf and the leaf our last extent came from, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) 		 * are located between these 2 leafs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) 		ret = get_last_extent(sctx, key->offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) 	if (sctx->cur_inode_last_extent < key->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) 		ret = range_is_hole_in_parent(sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) 					      sctx->cur_inode_last_extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) 					      key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) 		else if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) 			ret = send_hole(sctx, key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) 	sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) static int process_extent(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) 			  struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) 			  struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) 	struct clone_root *found_clone = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) 	if (S_ISLNK(sctx->cur_inode_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) 	if (sctx->parent_root && !sctx->cur_inode_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) 		ret = is_extent_unchanged(sctx, path, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) 			goto out_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) 		struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) 		u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) 		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) 				    struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) 		type = btrfs_file_extent_type(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) 		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) 		    type == BTRFS_FILE_EXTENT_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) 			 * The send spec does not have a prealloc command yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) 			 * so just leave a hole for prealloc'ed extents until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) 			 * we have enough commands queued up to justify rev'ing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) 			 * the send spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) 			if (type == BTRFS_FILE_EXTENT_PREALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) 			/* Have a hole, just skip it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) 			if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) 	ret = find_extent_clone(sctx, path, key->objectid, key->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) 			sctx->cur_inode_size, &found_clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) 	if (ret != -ENOENT && ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) 	ret = send_write_or_clone(sctx, path, key, found_clone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) out_hole:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) 	ret = maybe_send_hole(sctx, path, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) static int process_all_extents(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) 	struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) 	struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) 	root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) 	key.objectid = sctx->cmp_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) 	key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) 		eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) 		if (slot >= btrfs_header_nritems(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) 			ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) 			} else if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) 		btrfs_item_key_to_cpu(eb, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) 		if (found_key.objectid != key.objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) 		    found_key.type != key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) 		ret = process_extent(sctx, path, &found_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) 					   int *pending_move,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) 					   int *refs_processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) 	if (sctx->cur_ino == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) 	if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) 	    sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) 	if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) 	ret = process_recorded_refs(sctx, pending_move);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) 	*refs_processed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) 	u64 left_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) 	u64 left_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) 	u64 left_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) 	u64 right_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) 	u64 right_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) 	u64 right_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) 	int need_chmod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) 	int need_chown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) 	int need_truncate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) 	int pending_move = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) 	int refs_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) 	if (sctx->ignore_cur_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) 	ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) 					      &refs_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) 	 * We have processed the refs and thus need to advance send_progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) 	 * Now, calls to get_cur_xxx will take the updated refs of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) 	 * inode into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) 	 * On the other hand, if our current inode is a directory and couldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) 	 * be moved/renamed because its parent was renamed/moved too and it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) 	 * a higher inode number, we can only move/rename our current inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) 	 * after we moved/renamed its parent. Therefore in this case operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) 	 * the old path (pre move/rename) of our current inode, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) 	 * move/rename will be performed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) 	if (refs_processed && !pending_move)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) 		sctx->send_progress = sctx->cur_ino + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) 	if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) 	if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) 	ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) 			&left_mode, &left_uid, &left_gid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) 	if (!sctx->parent_root || sctx->cur_inode_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) 		need_chown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) 		if (!S_ISLNK(sctx->cur_inode_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) 			need_chmod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) 		if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) 			need_truncate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) 		u64 old_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) 		ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) 				&old_size, NULL, &right_mode, &right_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) 				&right_gid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) 		if (left_uid != right_uid || left_gid != right_gid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) 			need_chown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) 		if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) 			need_chmod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) 		if ((old_size == sctx->cur_inode_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) 		    (sctx->cur_inode_size > old_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) 		     sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) 			need_truncate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) 	if (S_ISREG(sctx->cur_inode_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) 		if (need_send_hole(sctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) 			if (sctx->cur_inode_last_extent == (u64)-1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) 			    sctx->cur_inode_last_extent <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) 			    sctx->cur_inode_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) 				ret = get_last_extent(sctx, (u64)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) 			if (sctx->cur_inode_last_extent <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) 			    sctx->cur_inode_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) 				ret = send_hole(sctx, sctx->cur_inode_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) 		if (need_truncate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) 			ret = send_truncate(sctx, sctx->cur_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) 					    sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) 					    sctx->cur_inode_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) 	if (need_chown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) 		ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) 				left_uid, left_gid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) 	if (need_chmod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) 		ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) 				left_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) 	ret = send_capabilities(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) 	 * If other directory inodes depended on our current directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) 	 * inode's move/rename, now do their move/rename operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) 	if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) 		ret = apply_children_dir_moves(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) 		 * Need to send that every time, no matter if it actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) 		 * changed between the two trees as we have done changes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) 		 * the inode before. If our inode is a directory and it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) 		 * waiting to be moved/renamed, we will send its utimes when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) 		 * it's moved/renamed, therefore we don't need to do it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) 		sctx->send_progress = sctx->cur_ino + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) 		ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) struct parent_paths_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) 	struct list_head *refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) 	struct send_ctx *sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) 			     void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) 	struct parent_paths_ctx *ppctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) 	return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) 			  ppctx->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215)  * Issue unlink operations for all paths of the current inode found in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216)  * parent snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) static int btrfs_unlink_all_paths(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) 	LIST_HEAD(deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) 	struct parent_paths_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) 	key.objectid = sctx->cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) 	key.type = BTRFS_INODE_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) 	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) 	ctx.refs = &deleted_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) 	ctx.sctx = sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) 		struct extent_buffer *eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) 		int slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) 		if (slot >= btrfs_header_nritems(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) 			ret = btrfs_next_leaf(sctx->parent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) 			else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) 		btrfs_item_key_to_cpu(eb, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) 		if (key.objectid != sctx->cur_ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) 		if (key.type != BTRFS_INODE_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) 		    key.type != BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) 		ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) 					record_parent_ref, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) 		path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) 	while (!list_empty(&deleted_refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) 		struct recorded_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) 		ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) 		ret = send_unlink(sctx, ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) 		fs_path_free(ref->full_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) 		list_del(&ref->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) 		kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) 		__free_recorded_refs(&deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) static int changed_inode(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) 			 enum btrfs_compare_tree_result result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) 	struct btrfs_key *key = sctx->cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) 	struct btrfs_inode_item *left_ii = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) 	struct btrfs_inode_item *right_ii = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) 	u64 left_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) 	u64 right_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) 	sctx->cur_ino = key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) 	sctx->cur_inode_new_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) 	sctx->cur_inode_last_extent = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) 	sctx->cur_inode_next_write_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) 	sctx->ignore_cur_inode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) 	 * Set send_progress to current inode. This will tell all get_cur_xxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) 	 * functions that the current inode's refs are not updated yet. Later,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) 	 * when process_recorded_refs is finished, it is set to cur_ino + 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) 	sctx->send_progress = sctx->cur_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) 	if (result == BTRFS_COMPARE_TREE_NEW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) 	    result == BTRFS_COMPARE_TREE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) 		left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) 				sctx->left_path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) 				struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) 		left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) 				left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) 		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) 				sctx->right_path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) 				struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) 		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) 				right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) 	if (result == BTRFS_COMPARE_TREE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) 		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) 				sctx->right_path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) 				struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) 		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) 				right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) 		 * The cur_ino = root dir case is special here. We can't treat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) 		 * the inode as deleted+reused because it would generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) 		 * stream that tries to delete/mkdir the root dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) 		if (left_gen != right_gen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) 		    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) 			sctx->cur_inode_new_gen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) 	 * Normally we do not find inodes with a link count of zero (orphans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) 	 * because the most common case is to create a snapshot and use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) 	 * for a send operation. However other less common use cases involve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) 	 * using a subvolume and send it after turning it to RO mode just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) 	 * after deleting all hard links of a file while holding an open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) 	 * file descriptor against it or turning a RO snapshot into RW mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) 	 * keep an open file descriptor against a file, delete it and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) 	 * turn the snapshot back to RO mode before using it for a send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) 	 * operation. So if we find such cases, ignore the inode and all its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) 	 * items completely if it's a new inode, or if it's a changed inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) 	 * make sure all its previous paths (from the parent snapshot) are all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) 	 * unlinked and all other the inode items are ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) 	if (result == BTRFS_COMPARE_TREE_NEW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) 	    result == BTRFS_COMPARE_TREE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) 		u32 nlinks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) 		nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) 		if (nlinks == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) 			sctx->ignore_cur_inode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) 			if (result == BTRFS_COMPARE_TREE_CHANGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) 				ret = btrfs_unlink_all_paths(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) 	if (result == BTRFS_COMPARE_TREE_NEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) 		sctx->cur_inode_gen = left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) 		sctx->cur_inode_new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) 		sctx->cur_inode_deleted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) 		sctx->cur_inode_size = btrfs_inode_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) 				sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) 		sctx->cur_inode_mode = btrfs_inode_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) 				sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) 		sctx->cur_inode_rdev = btrfs_inode_rdev(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) 				sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) 		if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) 			ret = send_create_inode_if_needed(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) 	} else if (result == BTRFS_COMPARE_TREE_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) 		sctx->cur_inode_gen = right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) 		sctx->cur_inode_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) 		sctx->cur_inode_deleted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) 		sctx->cur_inode_size = btrfs_inode_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) 				sctx->right_path->nodes[0], right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) 		sctx->cur_inode_mode = btrfs_inode_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) 				sctx->right_path->nodes[0], right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) 	} else if (result == BTRFS_COMPARE_TREE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) 		 * We need to do some special handling in case the inode was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) 		 * reported as changed with a changed generation number. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) 		 * means that the original inode was deleted and new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) 		 * reused the same inum. So we have to treat the old inode as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) 		 * deleted and the new one as new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) 		if (sctx->cur_inode_new_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) 			 * First, process the inode as if it was deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) 			sctx->cur_inode_gen = right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) 			sctx->cur_inode_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) 			sctx->cur_inode_deleted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) 			sctx->cur_inode_size = btrfs_inode_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) 					sctx->right_path->nodes[0], right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) 			sctx->cur_inode_mode = btrfs_inode_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) 					sctx->right_path->nodes[0], right_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) 			ret = process_all_refs(sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) 					BTRFS_COMPARE_TREE_DELETED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) 			 * Now process the inode as if it was new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) 			sctx->cur_inode_gen = left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) 			sctx->cur_inode_new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) 			sctx->cur_inode_deleted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) 			sctx->cur_inode_size = btrfs_inode_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) 					sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) 			sctx->cur_inode_mode = btrfs_inode_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) 					sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) 			sctx->cur_inode_rdev = btrfs_inode_rdev(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) 					sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) 			ret = send_create_inode_if_needed(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) 			ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) 			 * Advance send_progress now as we did not get into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) 			 * process_recorded_refs_if_needed in the new_gen case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) 			sctx->send_progress = sctx->cur_ino + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) 			 * Now process all extents and xattrs of the inode as if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) 			 * they were all new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) 			ret = process_all_extents(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) 			ret = process_all_new_xattrs(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) 			sctx->cur_inode_gen = left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) 			sctx->cur_inode_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) 			sctx->cur_inode_new_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) 			sctx->cur_inode_deleted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) 			sctx->cur_inode_size = btrfs_inode_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) 					sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) 			sctx->cur_inode_mode = btrfs_inode_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) 					sctx->left_path->nodes[0], left_ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465)  * We have to process new refs before deleted refs, but compare_trees gives us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466)  * the new and deleted refs mixed. To fix this, we record the new/deleted refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467)  * first and later process them in process_recorded_refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468)  * For the cur_inode_new_gen case, we skip recording completely because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469)  * changed_inode did already initiate processing of refs. The reason for this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470)  * that in this case, compare_tree actually compares the refs of 2 different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)  * inodes. To fix this, process_all_refs is used in changed_inode to handle all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472)  * refs of the right tree as deleted and all refs of the left tree as new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) static int changed_ref(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) 		       enum btrfs_compare_tree_result result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) 	if (sctx->cur_ino != sctx->cmp_key->objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) 		inconsistent_snapshot_error(sctx, result, "reference");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) 	if (!sctx->cur_inode_new_gen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) 	    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) 		if (result == BTRFS_COMPARE_TREE_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) 			ret = record_new_ref(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) 		else if (result == BTRFS_COMPARE_TREE_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) 			ret = record_deleted_ref(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) 		else if (result == BTRFS_COMPARE_TREE_CHANGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) 			ret = record_changed_ref(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498)  * Process new/deleted/changed xattrs. We skip processing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499)  * cur_inode_new_gen case because changed_inode did already initiate processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500)  * of xattrs. The reason is the same as in changed_ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) static int changed_xattr(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) 			 enum btrfs_compare_tree_result result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) 	if (sctx->cur_ino != sctx->cmp_key->objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) 		inconsistent_snapshot_error(sctx, result, "xattr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) 	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) 		if (result == BTRFS_COMPARE_TREE_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) 			ret = process_new_xattr(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) 		else if (result == BTRFS_COMPARE_TREE_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) 			ret = process_deleted_xattr(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) 		else if (result == BTRFS_COMPARE_TREE_CHANGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) 			ret = process_changed_xattr(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525)  * Process new/deleted/changed extents. We skip processing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526)  * cur_inode_new_gen case because changed_inode did already initiate processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527)  * of extents. The reason is the same as in changed_ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) static int changed_extent(struct send_ctx *sctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) 			  enum btrfs_compare_tree_result result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) 	 * We have found an extent item that changed without the inode item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) 	 * having changed. This can happen either after relocation (where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) 	 * disk_bytenr of an extent item is replaced at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) 	 * relocation.c:replace_file_extents()) or after deduplication into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) 	 * file in both the parent and send snapshots (where an extent item can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) 	 * get modified or replaced with a new one). Note that deduplication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) 	 * updates the inode item, but it only changes the iversion (sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) 	 * field in the inode item) of the inode, so if a file is deduplicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) 	 * the same amount of times in both the parent and send snapshots, its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) 	 * iversion becames the same in both snapshots, whence the inode item is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) 	 * the same on both snapshots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) 	if (sctx->cur_ino != sctx->cmp_key->objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) 	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) 		if (result != BTRFS_COMPARE_TREE_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) 			ret = process_extent(sctx, sctx->left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) 					sctx->cmp_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) static int dir_changed(struct send_ctx *sctx, u64 dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) 	u64 orig_gen, new_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) 	ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) 			     NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) 	ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) 			     NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) 	return (orig_gen != new_gen) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) 			struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) 	struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) 	struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) 	u64 dirid = 0, last_dirid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) 	unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) 	u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) 	u32 cur_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) 	int ref_name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) 	/* Easy case, just check this one dirid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) 	if (key->type == BTRFS_INODE_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) 		dirid = key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) 		ret = dir_changed(sctx, dirid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) 	leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) 	while (cur_offset < item_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) 		extref = (struct btrfs_inode_extref *)(ptr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) 						       cur_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) 		dirid = btrfs_inode_extref_parent(leaf, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) 		ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) 		cur_offset += ref_name_len + sizeof(*extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) 		if (dirid == last_dirid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) 		ret = dir_changed(sctx, dirid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) 		last_dirid = dirid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)  * Updates compare related fields in sctx and simply forwards to the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619)  * changed_xxx functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) static int changed_cb(struct btrfs_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) 		      struct btrfs_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) 		      struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) 		      enum btrfs_compare_tree_result result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) 		      void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) 	struct send_ctx *sctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) 	if (result == BTRFS_COMPARE_TREE_SAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) 		if (key->type == BTRFS_INODE_REF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) 		    key->type == BTRFS_INODE_EXTREF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) 			ret = compare_refs(sctx, left_path, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) 		} else if (key->type == BTRFS_EXTENT_DATA_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) 			return maybe_send_hole(sctx, left_path, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) 		result = BTRFS_COMPARE_TREE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) 	sctx->left_path = left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) 	sctx->right_path = right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) 	sctx->cmp_key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) 	ret = finish_inode_if_needed(sctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) 	/* Ignore non-FS objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) 	if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) 	    key->objectid == BTRFS_FREE_SPACE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) 	if (key->type == BTRFS_INODE_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) 		ret = changed_inode(sctx, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) 	} else if (!sctx->ignore_cur_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) 		if (key->type == BTRFS_INODE_REF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) 		    key->type == BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) 			ret = changed_ref(sctx, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) 		else if (key->type == BTRFS_XATTR_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) 			ret = changed_xattr(sctx, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) 		else if (key->type == BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) 			ret = changed_extent(sctx, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) static int full_send_tree(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) 	struct btrfs_root *send_root = sctx->send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) 	struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) 	struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) 	int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) 	path = alloc_path_for_send();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) 	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) 	key.type = BTRFS_INODE_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) 	key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) 	ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) 		eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) 		slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) 		btrfs_item_key_to_cpu(eb, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) 		ret = changed_cb(path, NULL, &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) 				 BTRFS_COMPARE_TREE_NEW, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) 		ret = btrfs_next_item(send_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) 			ret  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) 	ret = finish_inode_if_needed(sctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) 	btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) static int tree_move_down(struct btrfs_path *path, int *level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) 	struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) 	BUG_ON(*level == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) 	eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) 	if (IS_ERR(eb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) 		return PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) 	path->nodes[*level - 1] = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) 	path->slots[*level - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) 	(*level)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) static int tree_move_next_or_upnext(struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) 				    int *level, int root_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) 	int nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) 	nritems = btrfs_header_nritems(path->nodes[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) 	path->slots[*level]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) 	while (path->slots[*level] >= nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) 		if (*level == root_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) 		/* move upnext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) 		path->slots[*level] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) 		free_extent_buffer(path->nodes[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) 		path->nodes[*level] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) 		(*level)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) 		path->slots[*level]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) 		nritems = btrfs_header_nritems(path->nodes[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768)  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769)  * or down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) static int tree_advance(struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) 			int *level, int root_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) 			int allow_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) 			struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) 	if (*level == 0 || !allow_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) 		ret = tree_move_next_or_upnext(path, level, root_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) 		ret = tree_move_down(path, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) 		if (*level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) 			btrfs_item_key_to_cpu(path->nodes[*level], key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) 					path->slots[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) 			btrfs_node_key_to_cpu(path->nodes[*level], key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) 					path->slots[*level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) static int tree_compare_item(struct btrfs_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) 			     struct btrfs_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) 			     char *tmp_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) 	int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) 	int len1, len2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) 	unsigned long off1, off2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) 	if (len1 != len2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) 				right_path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) 	if (cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820)  * This function compares two trees and calls the provided callback for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821)  * every changed/new/deleted item it finds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822)  * If shared tree blocks are encountered, whole subtrees are skipped, making
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823)  * the compare pretty fast on snapshotted subvolumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825)  * This currently works on commit roots only. As commit roots are read only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826)  * we don't do any locking. The commit roots are protected with transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827)  * Transactions are ended and rejoined when a commit is tried in between.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829)  * This function checks for modifications done to the trees while comparing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830)  * If it detects a change, it aborts immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) static int btrfs_compare_trees(struct btrfs_root *left_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) 			struct btrfs_root *right_root, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) 	struct btrfs_fs_info *fs_info = left_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) 	int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) 	struct btrfs_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) 	struct btrfs_path *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) 	struct btrfs_key left_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) 	struct btrfs_key right_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) 	char *tmp_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) 	int left_root_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) 	int right_root_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) 	int left_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) 	int right_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) 	int left_end_reached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) 	int right_end_reached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) 	int advance_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) 	int advance_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) 	u64 left_blockptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) 	u64 right_blockptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) 	u64 left_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) 	u64 right_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) 	left_path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) 	if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) 	right_path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) 	if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) 	if (!tmp_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) 	left_path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) 	left_path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) 	right_path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) 	right_path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) 	 * Strategy: Go to the first items of both trees. Then do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) 	 * If both trees are at level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) 	 *   Compare keys of current items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) 	 *     If left < right treat left item as new, advance left tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) 	 *       and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) 	 *     If left > right treat right item as deleted, advance right tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) 	 *       and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) 	 *     If left == right do deep compare of items, treat as changed if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) 	 *       needed, advance both trees and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) 	 * If both trees are at the same level but not at level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) 	 *   Compare keys of current nodes/leafs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) 	 *     If left < right advance left tree and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) 	 *     If left > right advance right tree and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) 	 *     If left == right compare blockptrs of the next nodes/leafs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) 	 *       If they match advance both trees but stay at the same level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) 	 *         and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) 	 *       If they don't match advance both trees while allowing to go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) 	 *         deeper and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) 	 * If tree levels are different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) 	 *   Advance the tree that needs it and repeat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) 	 * Advancing a tree means:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) 	 *   If we are at level 0, try to go to the next slot. If that's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) 	 *   possible, go one level up and repeat. Stop when we found a level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) 	 *   where we could go to the next slot. We may at this point be on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) 	 *   node or a leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) 	 *   If we are not at level 0 and not on shared tree blocks, go one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) 	 *   level deeper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) 	 *   the right if possible or go up and right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) 	down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) 	left_level = btrfs_header_level(left_root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) 	left_root_level = left_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) 	left_path->nodes[left_level] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) 			btrfs_clone_extent_buffer(left_root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) 	if (!left_path->nodes[left_level]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) 		up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) 	right_level = btrfs_header_level(right_root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) 	right_root_level = right_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) 	right_path->nodes[right_level] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) 			btrfs_clone_extent_buffer(right_root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) 	if (!right_path->nodes[right_level]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) 		up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) 	up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) 	if (left_level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) 				&left_key, left_path->slots[left_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) 				&left_key, left_path->slots[left_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) 	if (right_level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) 				&right_key, right_path->slots[right_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) 				&right_key, right_path->slots[right_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) 	left_end_reached = right_end_reached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) 	advance_left = advance_right = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) 		if (advance_left && !left_end_reached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) 			ret = tree_advance(left_path, &left_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) 					left_root_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) 					advance_left != ADVANCE_ONLY_NEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) 					&left_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) 			if (ret == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) 				left_end_reached = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) 			else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) 			advance_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) 		if (advance_right && !right_end_reached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) 			ret = tree_advance(right_path, &right_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) 					right_root_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) 					advance_right != ADVANCE_ONLY_NEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) 					&right_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) 			if (ret == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) 				right_end_reached = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) 			else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) 			advance_right = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) 		if (left_end_reached && right_end_reached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) 		} else if (left_end_reached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) 			if (right_level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) 				ret = changed_cb(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) 						&right_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) 						BTRFS_COMPARE_TREE_DELETED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) 						ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) 			advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) 		} else if (right_end_reached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) 			if (left_level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) 				ret = changed_cb(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) 						&left_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) 						BTRFS_COMPARE_TREE_NEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) 						ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) 			advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) 		if (left_level == 0 && right_level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) 			if (cmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) 				ret = changed_cb(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) 						&left_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) 						BTRFS_COMPARE_TREE_NEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) 						ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) 				advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) 			} else if (cmp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) 				ret = changed_cb(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) 						&right_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) 						BTRFS_COMPARE_TREE_DELETED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) 						ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) 				advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) 				enum btrfs_compare_tree_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) 				ret = tree_compare_item(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) 							tmp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) 					result = BTRFS_COMPARE_TREE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) 					result = BTRFS_COMPARE_TREE_SAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) 				ret = changed_cb(left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) 						 &left_key, result, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) 				if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) 				advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) 				advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) 		} else if (left_level == right_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) 			if (cmp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) 				advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) 			} else if (cmp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) 				advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) 				left_blockptr = btrfs_node_blockptr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) 						left_path->nodes[left_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) 						left_path->slots[left_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) 				right_blockptr = btrfs_node_blockptr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) 						right_path->nodes[right_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) 						right_path->slots[right_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) 				left_gen = btrfs_node_ptr_generation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) 						left_path->nodes[left_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) 						left_path->slots[left_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) 				right_gen = btrfs_node_ptr_generation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) 						right_path->nodes[right_level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) 						right_path->slots[right_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) 				if (left_blockptr == right_blockptr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) 				    left_gen == right_gen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) 					/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) 					 * As we're on a shared block, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) 					 * allow to go deeper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) 					advance_left = ADVANCE_ONLY_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) 					advance_right = ADVANCE_ONLY_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) 					advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) 					advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) 		} else if (left_level < right_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) 			advance_right = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) 			advance_left = ADVANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) 	btrfs_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) 	btrfs_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) 	kvfree(tmp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) static int send_subvol(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) 	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) 		ret = send_header(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) 	ret = send_subvol_begin(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) 	if (sctx->parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) 		ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) 		ret = finish_inode_if_needed(sctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) 		ret = full_send_tree(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) 	free_recorded_refs(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118)  * If orphan cleanup did remove any orphans from a root, it means the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119)  * was modified and therefore the commit root is not the same as the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120)  * root anymore. This is a problem, because send uses the commit root and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121)  * therefore can see inode items that don't exist in the current root anymore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122)  * and for example make calls to btrfs_iget, which will do tree lookups based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123)  * on the current root and not on the commit root. Those lookups will fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124)  * returning a -ESTALE error, and making send fail with that error. So make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125)  * sure a send does not see any orphans we have just removed, and that it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126)  * see the same inodes regardless of whether a transaction commit happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127)  * before it started (meaning that the commit root will be the same as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128)  * current root) or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) 	struct btrfs_trans_handle *trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) 	if (sctx->parent_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) 	    sctx->parent_root->node != sctx->parent_root->commit_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) 		goto commit_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) 	for (i = 0; i < sctx->clone_roots_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) 		if (sctx->clone_roots[i].root->node !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) 		    sctx->clone_roots[i].root->commit_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) 			goto commit_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) 	if (trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) 		return btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) commit_trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) 	/* Use any root, all fs roots will get their commit roots updated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) 	if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) 		trans = btrfs_join_transaction(sctx->send_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) 		if (IS_ERR(trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) 			return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) 	return btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163)  * Make sure any existing dellaloc is flushed for any root used by a send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164)  * operation so that we do not miss any data and we do not race with writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165)  * finishing and changing a tree while send is using the tree. This could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166)  * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167)  * a send operation then uses the subvolume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168)  * After flushing delalloc ensure_commit_roots_uptodate() must be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) static int flush_delalloc_roots(struct send_ctx *sctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) 	struct btrfs_root *root = sctx->parent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) 	if (root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) 		ret = btrfs_start_delalloc_snapshot(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) 		root = sctx->clone_roots[i].root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) 		ret = btrfs_start_delalloc_snapshot(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) 	spin_lock(&root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) 	root->send_in_progress--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) 	 * Not much left to do, we don't know why it's unbalanced and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) 	 * can't blindly reset it to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) 	if (root->send_in_progress < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) 		btrfs_err(root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) 			  "send_in_progress unbalanced %d root %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) 			  root->send_in_progress, root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) 	spin_unlock(&root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) static void dedupe_in_progress_warn(const struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) 	btrfs_warn_rl(root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) 		      root->root_key.objectid, root->dedupe_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) 	struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) 	struct btrfs_fs_info *fs_info = send_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) 	struct btrfs_root *clone_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) 	struct send_ctx *sctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) 	u64 *clone_sources_tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) 	int clone_sources_to_rollback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) 	size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) 	int sort_clone_roots = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) 	 * The subvolume must remain read-only during send, protect against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) 	 * making it RW. This also protects against deletion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) 	spin_lock(&send_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) 	if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) 		dedupe_in_progress_warn(send_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) 		spin_unlock(&send_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) 	send_root->send_in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) 	spin_unlock(&send_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) 	 * Userspace tools do the checks and warn the user if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) 	 * not RO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) 	if (!btrfs_root_readonly(send_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) 	 * Check that we don't overflow at later allocations, we request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) 	 * clone_sources_count + 1 items, and compare to unsigned long inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) 	 * access_ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) 	if (arg->clone_sources_count >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) 	    ULONG_MAX / sizeof(struct clone_root) - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) 	if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) 	sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) 	if (!sctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) 	INIT_LIST_HEAD(&sctx->new_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) 	INIT_LIST_HEAD(&sctx->deleted_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) 	INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) 	INIT_LIST_HEAD(&sctx->name_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) 	sctx->flags = arg->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) 	sctx->send_filp = fget(arg->send_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) 	if (!sctx->send_filp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) 		ret = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) 	sctx->send_root = send_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) 	 * Unlikely but possible, if the subvolume is marked for deletion but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) 	 * is slow to remove the directory entry, send can still be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) 	if (btrfs_root_dead(sctx->send_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) 	sctx->clone_roots_cnt = arg->clone_sources_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) 	sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) 	sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) 	if (!sctx->send_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) 	sctx->pending_dir_moves = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) 	sctx->waiting_dir_moves = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) 	sctx->orphan_dirs = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) 	sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) 				     arg->clone_sources_count + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) 				     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) 	if (!sctx->clone_roots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) 	alloc_size = array_size(sizeof(*arg->clone_sources),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) 				arg->clone_sources_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) 	if (arg->clone_sources_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) 		clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) 		if (!clone_sources_tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) 		ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) 				alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) 		for (i = 0; i < arg->clone_sources_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) 			clone_root = btrfs_get_fs_root(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) 						clone_sources_tmp[i], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) 			if (IS_ERR(clone_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) 				ret = PTR_ERR(clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) 			spin_lock(&clone_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) 			if (!btrfs_root_readonly(clone_root) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) 			    btrfs_root_dead(clone_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) 				spin_unlock(&clone_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) 				btrfs_put_root(clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) 				ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) 			if (clone_root->dedupe_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) 				dedupe_in_progress_warn(clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) 				spin_unlock(&clone_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) 				btrfs_put_root(clone_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) 				ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) 			clone_root->send_in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) 			spin_unlock(&clone_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) 			sctx->clone_roots[i].root = clone_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) 			clone_sources_to_rollback = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) 		kvfree(clone_sources_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) 		clone_sources_tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) 	if (arg->parent_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) 		sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) 						      true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) 		if (IS_ERR(sctx->parent_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) 			ret = PTR_ERR(sctx->parent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) 		spin_lock(&sctx->parent_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) 		sctx->parent_root->send_in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) 		if (!btrfs_root_readonly(sctx->parent_root) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) 				btrfs_root_dead(sctx->parent_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) 			spin_unlock(&sctx->parent_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) 			ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) 		if (sctx->parent_root->dedupe_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) 			dedupe_in_progress_warn(sctx->parent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) 			spin_unlock(&sctx->parent_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) 		spin_unlock(&sctx->parent_root->root_item_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) 	 * Clones from send_root are allowed, but only if the clone source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) 	 * is behind the current send position. This is checked while searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) 	 * for possible clone sources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) 	sctx->clone_roots[sctx->clone_roots_cnt++].root =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) 		btrfs_grab_root(sctx->send_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) 	/* We do a bsearch later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) 	sort(sctx->clone_roots, sctx->clone_roots_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) 			sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) 	sort_clone_roots = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) 	ret = flush_delalloc_roots(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) 	ret = ensure_commit_roots_uptodate(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) 	mutex_lock(&fs_info->balance_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) 	if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) 		mutex_unlock(&fs_info->balance_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) 		btrfs_warn_rl(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) 		"cannot run send because a balance operation is in progress");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) 	fs_info->send_in_progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) 	mutex_unlock(&fs_info->balance_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) 	current->journal_info = BTRFS_SEND_TRANS_STUB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) 	ret = send_subvol(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) 	current->journal_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) 	mutex_lock(&fs_info->balance_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) 	fs_info->send_in_progress--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) 	mutex_unlock(&fs_info->balance_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) 	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) 		ret = begin_cmd(sctx, BTRFS_SEND_C_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) 		ret = send_cmd(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) 	while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) 		struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) 		struct pending_dir_move *pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) 		n = rb_first(&sctx->pending_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) 		pm = rb_entry(n, struct pending_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) 		while (!list_empty(&pm->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) 			struct pending_dir_move *pm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) 			pm2 = list_first_entry(&pm->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) 					       struct pending_dir_move, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) 			free_pending_move(sctx, pm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) 		free_pending_move(sctx, pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) 	while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) 		struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) 		struct waiting_dir_move *dm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) 		n = rb_first(&sctx->waiting_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) 		dm = rb_entry(n, struct waiting_dir_move, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) 		rb_erase(&dm->node, &sctx->waiting_dir_moves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) 		kfree(dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) 	while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) 		struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) 		struct orphan_dir_info *odi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) 		n = rb_first(&sctx->orphan_dirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) 		odi = rb_entry(n, struct orphan_dir_info, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) 		free_orphan_dir_info(sctx, odi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) 	if (sort_clone_roots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) 		for (i = 0; i < sctx->clone_roots_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) 			btrfs_root_dec_send_in_progress(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) 					sctx->clone_roots[i].root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) 			btrfs_put_root(sctx->clone_roots[i].root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) 		for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) 			btrfs_root_dec_send_in_progress(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) 					sctx->clone_roots[i].root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) 			btrfs_put_root(sctx->clone_roots[i].root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) 		btrfs_root_dec_send_in_progress(send_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) 	if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) 		btrfs_root_dec_send_in_progress(sctx->parent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) 		btrfs_put_root(sctx->parent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) 	kvfree(clone_sources_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) 	if (sctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) 		if (sctx->send_filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) 			fput(sctx->send_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) 		kvfree(sctx->clone_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) 		kvfree(sctx->send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) 		name_cache_free(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) 		kfree(sctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) }