^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/error-injection.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "volumes.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "locking.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "btrfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "async-thread.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "free-space-cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "inode-map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "qgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "print-tree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "delalloc-space.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "block-group.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "backref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "misc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Relocation overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * [What does relocation do]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The objective of relocation is to relocate all extents of the target block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * group to other block groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * This is utilized by resize (shrink only), profile converting, compacting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * space, or balance routine to spread chunks over devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Before | After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * ------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * BG A: 10 data extents | BG A: deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * [How does relocation work]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * 1. Mark the target block group read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * New extents won't be allocated from the target block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * 2.1 Record each extent in the target block group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * To build a proper map of extents to be relocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * 2.2 Build data reloc tree and reloc trees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Data reloc tree will contain an inode, recording all newly relocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * data extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * There will be only one data reloc tree for one data block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Reloc tree will be a special snapshot of its source tree, containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * relocated tree blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Each tree referring to a tree block in target block group will get its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * reloc tree built.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 2.3 Swap source tree with its corresponding reloc tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Each involved tree only refers to new extents after swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * 3. Cleanup reloc trees and data reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * As old extents in the target block group are still referenced by reloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * trees, we need to clean them up before really freeing the target block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The main complexity is in steps 2.2 and 2.3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The entry point of relocation is relocate_block_group() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define RELOCATION_RESERVED_NODES 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * map address of tree root to tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct mapping_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }; /* Use rb_simle_node for search/insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct mapping_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct rb_root rb_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * present a tree block to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct tree_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct rb_node rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }; /* Use rb_simple_node for search/insert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int level:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int key_ready:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MAX_EXTENTS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct file_extent_cluster {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 boundary[MAX_EXTENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct reloc_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* block group to relocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct btrfs_block_group *block_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* extent tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct btrfs_root *extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* inode for moving data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct inode *data_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct btrfs_block_rsv *block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct btrfs_backref_cache backref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct file_extent_cluster cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* tree blocks have been processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct extent_io_tree processed_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* map start of tree root to corresponding reloc tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct mapping_tree reloc_root_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* list of reloc trees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct list_head reloc_roots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* list of subvolume trees that get relocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct list_head dirty_subvol_roots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* size of metadata reservation for merging reloc trees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u64 merging_rsv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* size of relocated tree nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 nodes_relocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* reserved size for block group relocation*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u64 reserved_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 search_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u64 extents_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned int stage:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned int create_reloc_tree:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int merge_reloc_tree:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int found_file_extent:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* stages of data relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define MOVE_DATA_EXTENTS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define UPDATE_DATA_PTRS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void mark_block_processed(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (node->level == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) in_range(node->bytenr, rc->block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rc->block_group->length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) blocksize = rc->extent_root->fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) set_extent_bits(&rc->processed_blocks, node->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) node->bytenr + blocksize - 1, EXTENT_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) node->processed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void mapping_tree_init(struct mapping_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) tree->rb_root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_lock_init(&tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * walk up backref nodes until reach node presents tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static struct btrfs_backref_node *walk_up_backref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct btrfs_backref_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct btrfs_backref_edge *edges[], int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int idx = *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) while (!list_empty(&node->upper)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) edge = list_entry(node->upper.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct btrfs_backref_edge, list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) edges[idx++] = edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) node = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) BUG_ON(node->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * walk down backref nodes to find start of next reference path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static struct btrfs_backref_node *walk_down_backref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct btrfs_backref_edge *edges[], int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct btrfs_backref_node *lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int idx = *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while (idx > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) edge = edges[idx - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) lower = edge->node[LOWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (list_is_last(&edge->list[LOWER], &lower->upper)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) idx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) edge = list_entry(edge->list[LOWER].next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct btrfs_backref_edge, list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) edges[idx - 1] = edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void update_backref_node(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct btrfs_backref_node *node, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rb_erase(&node->rb_node, &cache->rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) node->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * update backref cache after a transaction commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int update_backref_cache(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct btrfs_backref_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (cache->last_trans == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cache->last_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (cache->last_trans == trans->transid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * detached nodes are used to avoid unnecessary backref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * lookup. transaction commit changes the extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * so the detached nodes are no longer useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) while (!list_empty(&cache->detached)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) node = list_entry(cache->detached.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) btrfs_backref_cleanup_node(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) while (!list_empty(&cache->changed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) node = list_entry(cache->changed.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) list_del_init(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) BUG_ON(node->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) update_backref_node(cache, node, node->new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * some nodes can be left in the pending list if there were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * errors during processing the pending nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) list_for_each_entry(node, &cache->pending[level], list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) BUG_ON(!node->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (node->bytenr == node->new_bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) update_backref_node(cache, node, node->new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cache->last_trans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static bool reloc_root_is_dead(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Pair with set_bit/clear_bit in clean_dirty_subvols and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * btrfs_update_reloc_root. We need to see the updated bit before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * trying to access reloc_root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Check if this subvolume tree has valid reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Reloc tree after swap is considered dead, thus not considered as valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * This is enough for most callers, as they don't distinguish dead reloc root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static bool have_reloc_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (reloc_root_is_dead(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!root->reloc_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* This root has been merged with its reloc tree, we can ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (reloc_root_is_dead(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!reloc_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (btrfs_header_generation(reloc_root->commit_root) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) root->fs_info->running_transaction->transid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * if there is reloc tree and it was created in previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * transaction backref lookup can find the reloc tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * so backref node for the fs tree root is useless for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * find reloc tree by address of tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct reloc_control *rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct mapping_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct btrfs_root *root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ASSERT(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_lock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) node = rb_entry(rb_node, struct mapping_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) root = (struct btrfs_root *)node->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) spin_unlock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return btrfs_grab_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * For useless nodes, do two major clean ups:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * - Cleanup the children edges and nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * If child node is also orphan (no parent) during cleanup, then the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * node will also be cleaned up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * - Freeing up leaves (level 0), keeps nodes detached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * For nodes, the node is still cached as "detached"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Return false if @node is not in the @useless_nodes list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Return true if @node is in the @useless_nodes list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static bool handle_useless_nodes(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct btrfs_backref_cache *cache = &rc->backref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct list_head *useless_node = &cache->useless_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) while (!list_empty(useless_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct btrfs_backref_node *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cur = list_first_entry(useless_node, struct btrfs_backref_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) list_del_init(&cur->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Only tree root nodes can be added to @useless_nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ASSERT(list_empty(&cur->upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (cur == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* The node is the lowest node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (cur->lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) list_del_init(&cur->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cur->lowest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Cleanup the lower edges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) while (!list_empty(&cur->lower)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct btrfs_backref_node *lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) edge = list_entry(cur->lower.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct btrfs_backref_edge, list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) list_del(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) list_del(&edge->list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) lower = edge->node[LOWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Child node is also orphan, queue for cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (list_empty(&lower->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) list_add(&lower->list, useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Mark this block processed for relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mark_block_processed(rc, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Backref nodes for tree leaves are deleted from the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Backref nodes for upper level tree blocks are left in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * cache to avoid unnecessary backref lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (cur->level > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) list_add(&cur->list, &cache->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) cur->detached = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rb_erase(&cur->rb_node, &cache->rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) btrfs_backref_free_node(cache, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Build backref tree for a given tree block. Root of the backref tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * corresponds the tree block, leaves of the backref tree correspond roots of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * b-trees that reference the tree block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * The basic idea of this function is check backrefs of a given block to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * upper level blocks that reference the block, and then check backrefs of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * these upper level blocks recursively. The recursion stops when tree root is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * reached or backrefs for the block is cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * NOTE: if we find that backrefs for a block are cached, we know backrefs for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * all upper level blocks that directly/indirectly reference the block are also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct reloc_control *rc, struct btrfs_key *node_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int level, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct btrfs_backref_iter *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct btrfs_backref_cache *cache = &rc->backref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* For searching parent of TREE_BLOCK_REF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct btrfs_backref_node *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct btrfs_backref_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) node = btrfs_backref_alloc_node(cache, bytenr, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) node->lowest = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cur = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Breadth-first search to build backref cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) edge = list_first_entry_or_null(&cache->pending_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct btrfs_backref_edge, list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * The pending list isn't empty, take the first block to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (edge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) list_del_init(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) cur = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) } while (edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* Finish the upper linkage of newly added edges/nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ret = btrfs_backref_finish_upper_links(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (handle_useless_nodes(rc, node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) btrfs_backref_iter_free(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) btrfs_backref_error_cleanup(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ASSERT(!node || !node->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ASSERT(list_empty(&cache->useless_node) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) list_empty(&cache->pending_edge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * helper to add backref node for the newly created snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * the backref node is created by cloning backref node that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * corresponds to root of source tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int clone_backref_node(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct btrfs_root *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct btrfs_root *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct btrfs_root *reloc_root = src->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct btrfs_backref_cache *cache = &rc->backref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct btrfs_backref_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct btrfs_backref_node *new_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct btrfs_backref_edge *new_edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (cache->last_trans > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) update_backref_cache(trans, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (node->detached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) BUG_ON(node->new_bytenr != reloc_root->node->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rb_node = rb_simple_search(&cache->rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) reloc_root->commit_root->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) node = rb_entry(rb_node, struct btrfs_backref_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) BUG_ON(node->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) new_node = btrfs_backref_alloc_node(cache, dest->node->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) node->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!new_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) new_node->lowest = node->lowest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) new_node->checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) new_node->root = btrfs_grab_root(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ASSERT(new_node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!node->lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) list_for_each_entry(edge, &node->lower, list[UPPER]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) new_edge = btrfs_backref_alloc_edge(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (!new_edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) btrfs_backref_link_edge(new_edge, edge->node[LOWER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) new_node, LINK_UPPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) list_add_tail(&new_node->lower, &cache->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) &new_node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!new_node->lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) list_add_tail(&new_edge->list[LOWER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) &new_edge->node[LOWER]->upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) while (!list_empty(&new_node->lower)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) new_edge = list_entry(new_node->lower.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct btrfs_backref_edge, list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) list_del(&new_edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) btrfs_backref_free_edge(cache, new_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) btrfs_backref_free_node(cache, new_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * helper to add 'address of tree root -> reloc tree' mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static int __must_check __add_reloc_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct mapping_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct reloc_control *rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) node = kmalloc(sizeof(*node), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) node->bytenr = root->commit_root->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) node->data = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spin_lock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) node->bytenr, &node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_unlock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) btrfs_panic(fs_info, -EEXIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) "Duplicate root found for start=%llu while inserting into relocation tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) node->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) list_add_tail(&root->root_list, &rc->reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * helper to delete the 'address of tree root -> reloc tree'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static void __del_reloc_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct mapping_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct reloc_control *rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) bool put_ref = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (rc && root->node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) spin_lock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) root->commit_root->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) node = rb_entry(rb_node, struct mapping_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) RB_CLEAR_NODE(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ASSERT(!node || (struct btrfs_root *)node->data == root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * We only put the reloc root here if it's on the list. There's a lot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * of places where the pattern is to splice the rc->reloc_roots, process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * the reloc roots, and then add the reloc root back onto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * rc->reloc_roots. If we call __del_reloc_root while it's off of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * list we don't want the reference being dropped, because the guy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * messing with the list is in charge of the reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_lock(&fs_info->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!list_empty(&root->root_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) put_ref = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) list_del_init(&root->root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) spin_unlock(&fs_info->trans_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (put_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * helper to update the 'address of tree root -> reloc tree'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static int __update_reloc_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct mapping_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct reloc_control *rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) root->commit_root->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) node = rb_entry(rb_node, struct mapping_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) spin_unlock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) BUG_ON((struct btrfs_root *)node->data != root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spin_lock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) node->bytenr = root->node->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) node->bytenr, &node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) spin_unlock(&rc->reloc_root_tree.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct btrfs_root *root, u64 objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct btrfs_root_item *root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct btrfs_key root_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) bool must_abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!root_item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) root_key.type = BTRFS_ROOT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) root_key.offset = objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (root->root_key.objectid == objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u64 commit_root_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* called by btrfs_init_reloc_root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) BTRFS_TREE_RELOC_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Set the last_snapshot field to the generation of the commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * root - like this ctree.c:btrfs_block_can_be_shared() behaves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * correctly (returns true) when the relocation root is created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * either inside the critical section of a transaction commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * (through transaction.c:qgroup_account_snapshot()) and when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * it's created before the transaction commit is started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) commit_root_gen = btrfs_header_generation(root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * called by btrfs_reloc_post_snapshot_hook.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * the source tree is a reloc tree, all tree blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * modified after it was created have RELOC flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * set in their headers. so it's OK to not update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * the 'last_snapshot'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ret = btrfs_copy_root(trans, root, root->node, &eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) BTRFS_TREE_RELOC_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * We have changed references at this point, we must abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * transaction if anything fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) must_abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) memcpy(root_item, &root->root_item, sizeof(*root_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) btrfs_set_root_bytenr(root_item, eb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) btrfs_set_root_level(root_item, btrfs_header_level(eb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) btrfs_set_root_generation(root_item, trans->transid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (root->root_key.objectid == objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) btrfs_set_root_refs(root_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) memset(&root_item->drop_progress, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) sizeof(struct btrfs_disk_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) root_item->drop_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) btrfs_tree_unlock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = btrfs_insert_root(trans, fs_info->tree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) &root_key, root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) kfree(root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (IS_ERR(reloc_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ret = PTR_ERR(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) reloc_root->last_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) kfree(root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (must_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * create reloc tree for a given fs tree. reloc tree is just a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * snapshot of the fs tree with special root objectid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * The reloc_root comes out of here with two references, one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * root->reloc_root, and another for being on the rc->reloc_roots list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct reloc_control *rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct btrfs_block_rsv *rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int clear_rsv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * The subvolume has reloc tree but the swap is finished, no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * create/update the dead reloc tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (reloc_root_is_dead(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * This is subtle but important. We do not do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * record_root_in_transaction for reloc roots, instead we record their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * corresponding fs root, and then here we update the last trans for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * reloc root. This means that we have to do this for the entire life
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * of the reloc root, regardless of which stage of the relocation we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (root->reloc_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) reloc_root->last_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * We are merging reloc roots, we do not need new reloc trees. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * reloc trees never need their own reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!rc->create_reloc_tree ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!trans->reloc_reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) rsv = trans->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) trans->block_rsv = rc->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) clear_rsv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (clear_rsv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) trans->block_rsv = rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ret = __add_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) BUG_ON(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) root->reloc_root = btrfs_grab_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * update root item of reloc tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct btrfs_root_item *root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!have_reloc_root(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) root_item = &reloc_root->root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * We are probably ok here, but __del_reloc_root() will drop its ref of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * the root. We have the ref for root->reloc_root, but just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * hold it while we update the reloc root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) btrfs_grab_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* root->reloc_root will stay until current relocation finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (fs_info->reloc_ctl->merge_reloc_tree &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) btrfs_root_refs(root_item) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * Mark the tree as dead before we change reloc_root so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * have_reloc_root will not touch it from now on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) __del_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (reloc_root->commit_root != reloc_root->node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) __update_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) btrfs_set_root_node(root_item, reloc_root->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) free_extent_buffer(reloc_root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) reloc_root->commit_root = btrfs_root_node(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ret = btrfs_update_root(trans, fs_info->tree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) &reloc_root->root_key, root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) btrfs_put_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * helper to find first cached inode with inode number >= objectid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * in a subvolume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct rb_node *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct btrfs_inode *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) spin_lock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) node = root->inode_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) prev = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) entry = rb_entry(node, struct btrfs_inode, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (objectid < btrfs_ino(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) else if (objectid > btrfs_ino(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) while (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) entry = rb_entry(prev, struct btrfs_inode, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (objectid <= btrfs_ino(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) node = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) prev = rb_next(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) entry = rb_entry(node, struct btrfs_inode, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) inode = igrab(&entry->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) objectid = btrfs_ino(entry) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (cond_resched_lock(&root->inode_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) node = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_unlock(&root->inode_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * get new location of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u64 bytenr, u64 num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) bytenr -= BTRFS_I(reloc_inode)->index_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = btrfs_lookup_file_extent(NULL, root, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) fi = btrfs_item_ptr(leaf, path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) btrfs_file_extent_compression(leaf, fi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) btrfs_file_extent_encryption(leaf, fi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) btrfs_file_extent_other_encoding(leaf, fi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * update file extent items in the tree leaf to point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * the new locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int replace_file_extents(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct extent_buffer *leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) u64 new_bytenr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u32 nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (rc->stage != UPDATE_DATA_PTRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* reloc trees always use full backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) parent = leaf->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) parent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) nritems = btrfs_header_nritems(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) for (i = 0; i < nritems; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct btrfs_ref ref = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) btrfs_item_key_to_cpu(leaf, &key, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (btrfs_file_extent_type(leaf, fi) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) BTRFS_FILE_EXTENT_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (bytenr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!in_range(bytenr, rc->block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) rc->block_group->length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * if we are modifying block in fs tree, wait for readpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * to complete and drop the extent cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) inode = find_next_inode(root, key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) btrfs_add_delayed_iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) inode = find_next_inode(root, key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) end = key.offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) btrfs_file_extent_num_bytes(leaf, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) WARN_ON(!IS_ALIGNED(key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) fs_info->sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) end--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) key.offset, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) btrfs_drop_extent_cache(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) key.offset, end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) unlock_extent(&BTRFS_I(inode)->io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) key.offset, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ret = get_new_location(rc->data_inode, &new_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) bytenr, num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Don't have to abort since we've not changed anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * in the file extent yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) dirty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) key.offset -= btrfs_file_extent_offset(leaf, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) num_bytes, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ref.real_root = root->root_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) key.objectid, key.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = btrfs_inc_extent_ref(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) num_bytes, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ref.real_root = root->root_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) key.objectid, key.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ret = btrfs_free_extent(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) btrfs_abort_transaction(trans, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) btrfs_mark_buffer_dirty(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) btrfs_add_delayed_iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int memcmp_node_keys(struct extent_buffer *eb, int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct btrfs_path *path, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct btrfs_disk_key key1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct btrfs_disk_key key2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) btrfs_node_key(eb, &key1, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return memcmp(&key1, &key2, sizeof(key1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * try to replace tree blocks in fs tree with the new blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * in reloc tree. tree blocks haven't been modified since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * reloc tree was create can be replaced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * if a block was replaced, level of the block + 1 is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * if no block got replaced, 0 is returned. if there are other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * errors, a negative error number is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct btrfs_root *dest, struct btrfs_root *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct btrfs_path *path, struct btrfs_key *next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int lowest_level, int max_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct btrfs_fs_info *fs_info = dest->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct extent_buffer *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct btrfs_ref ref = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u64 old_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) u64 new_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) u64 old_ptr_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) u64 new_ptr_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) u64 last_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) u32 blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int cow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) last_snapshot = btrfs_root_last_snapshot(&src->root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) slot = path->slots[lowest_level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) eb = btrfs_lock_root_node(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) btrfs_set_lock_blocking_write(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) level = btrfs_header_level(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (level < lowest_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) btrfs_tree_unlock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) BTRFS_NESTING_COW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) btrfs_set_lock_blocking_write(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (next_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) next_key->objectid = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) next_key->type = (u8)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) next_key->offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) parent = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct btrfs_key first_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) level = btrfs_header_level(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ASSERT(level >= lowest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = btrfs_bin_search(parent, &key, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (ret && slot > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) slot--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (next_key && slot + 1 < btrfs_header_nritems(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) btrfs_node_key_to_cpu(parent, next_key, slot + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) old_bytenr = btrfs_node_blockptr(parent, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) blocksize = fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) btrfs_node_key_to_cpu(parent, &first_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (level <= max_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) new_bytenr = btrfs_node_blockptr(eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) new_ptr_gen = btrfs_node_ptr_generation(eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) new_bytenr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) new_ptr_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ret = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) memcmp_node_keys(parent, slot, path, level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (level <= lowest_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) level - 1, &first_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ret = PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) btrfs_tree_lock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ret = btrfs_cow_block(trans, dest, eb, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) slot, &eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) BTRFS_NESTING_COW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) btrfs_set_lock_blocking_write(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) btrfs_tree_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) free_extent_buffer(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) parent = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) btrfs_tree_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) free_extent_buffer(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) cow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) btrfs_node_key_to_cpu(path->nodes[level], &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) path->lowest_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) path->lowest_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Info qgroup to trace both subtrees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * We must trace both trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * 1) Tree reloc subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * If not traced, we will leak data numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * 2) Fs subtree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * If not traced, we will double count old data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * We don't scan the subtree right now, but only record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * the swapped tree blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * The real subtree rescan is delayed until we have new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * CoW on the subtree root node before transaction commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) rc->block_group, parent, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) path->nodes[level], path->slots[level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) last_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * swap blocks in fs tree and reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) btrfs_set_node_blockptr(parent, slot, new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) btrfs_mark_buffer_dirty(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) btrfs_set_node_blockptr(path->nodes[level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) path->slots[level], old_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) btrfs_set_node_ptr_generation(path->nodes[level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) path->slots[level], old_ptr_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) btrfs_mark_buffer_dirty(path->nodes[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) blocksize, path->nodes[level]->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ref.skip_qgroup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ret = btrfs_inc_extent_ref(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) blocksize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ref.skip_qgroup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ret = btrfs_inc_extent_ref(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) blocksize, path->nodes[level]->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ref.skip_qgroup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ret = btrfs_free_extent(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) blocksize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) ref.skip_qgroup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ret = btrfs_free_extent(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) btrfs_unlock_up_safe(path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) ret = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) btrfs_tree_unlock(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) free_extent_buffer(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * helper to find next relocated block in reloc tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int *level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) u64 last_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) u32 nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) last_snapshot = btrfs_root_last_snapshot(&root->root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) for (i = 0; i < *level; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) free_extent_buffer(path->nodes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) path->nodes[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) eb = path->nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) nritems = btrfs_header_nritems(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) while (path->slots[i] + 1 < nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) path->slots[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) last_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) *level = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) free_extent_buffer(path->nodes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) path->nodes[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * walk down reloc tree to find relocated block of lowest level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int *level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct extent_buffer *eb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) u64 ptr_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) u64 last_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) u32 nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) last_snapshot = btrfs_root_last_snapshot(&root->root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) for (i = *level; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct btrfs_key first_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) eb = path->nodes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) nritems = btrfs_header_nritems(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) while (path->slots[i] < nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ptr_gen > last_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) path->slots[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (path->slots[i] >= nritems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (i == *level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) *level = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) *level = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) bytenr = btrfs_node_blockptr(eb, path->slots[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) &first_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) BUG_ON(btrfs_header_level(eb) != i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) path->nodes[i - 1] = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) path->slots[i - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * invalidate extent cache for file extents whose key in range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * [min_key, max_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static int invalidate_extent_cache(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct btrfs_key *min_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct btrfs_key *max_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) u64 objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) objectid = min_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (objectid > max_key->objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) inode = find_next_inode(root, objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ino = btrfs_ino(BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (ino > max_key->objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) objectid = ino + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (unlikely(min_key->objectid == ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (min_key->type > BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (min_key->type < BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) start = min_key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (unlikely(max_key->objectid == ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (max_key->type < BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) end = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (max_key->offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) end = max_key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) end--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) end = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* the lock_extent waits for readpage to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) lock_extent(&BTRFS_I(inode)->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static int find_next_key(struct btrfs_path *path, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct btrfs_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) while (level < BTRFS_MAX_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!path->nodes[level])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (path->slots[level] + 1 <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) btrfs_header_nritems(path->nodes[level])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) btrfs_node_key_to_cpu(path->nodes[level], key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) path->slots[level] + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) level++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Insert current subvolume into reloc_control::dirty_subvol_roots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct btrfs_root *reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct btrfs_root_item *reloc_root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* @root must be a subvolume tree root with a valid reloc tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ASSERT(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) reloc_root_item = &reloc_root->root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) memset(&reloc_root_item->drop_progress, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) sizeof(reloc_root_item->drop_progress));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) reloc_root_item->drop_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) btrfs_set_root_refs(reloc_root_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) btrfs_update_reloc_root(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (list_empty(&root->reloc_dirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) btrfs_grab_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int clean_dirty_subvols(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct btrfs_root *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) reloc_dirty_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* Merged subvolume, cleanup its reloc root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct btrfs_root *reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) list_del_init(&root->reloc_dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) root->reloc_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * Need barrier to ensure clear_bit() only happens after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * root->reloc_root = NULL. Pairs with have_reloc_root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (reloc_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * btrfs_drop_snapshot drops our ref we hold for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * ->reloc_root. If it fails however we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * drop the ref ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (ret2 < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) btrfs_put_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* Orphan reloc tree, just clean it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) ret2 = btrfs_drop_snapshot(root, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (ret2 < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * merge the relocated tree blocks in reloc tree with corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * fs tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct btrfs_key next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct btrfs_trans_handle *trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct btrfs_root_item *root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) int reserve_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) int max_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int replaced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) u32 min_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) path->reada = READA_FORWARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) reloc_root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) root_item = &reloc_root->root_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) level = btrfs_root_level(root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) atomic_inc(&reloc_root->node->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) path->nodes[level] = reloc_root->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) path->slots[level] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) level = root_item->drop_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) BUG_ON(level == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) path->lowest_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) path->lowest_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) btrfs_node_key_to_cpu(path->nodes[level], &next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) WARN_ON(memcmp(&key, &next_key, sizeof(key)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) btrfs_unlock_up_safe(path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * In merge_reloc_root(), we modify the upper level pointer to swap the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * tree blocks between reloc tree and subvolume tree. Thus for tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * block COW, we COW at most from level 1 to root level for each tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * Thus the needed metadata size is at most root_level * nodesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * and * 2 since we have two trees to COW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) reserve_level = max_t(int, 1, btrfs_root_level(root_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) min_reserved = fs_info->nodesize * reserve_level * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) memset(&next_key, 0, sizeof(next_key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) BTRFS_RESERVE_FLUSH_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) trans = btrfs_start_transaction(root, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) err = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * At this point we no longer have a reloc_control, so we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * depend on btrfs_init_reloc_root to update our last_trans.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * But that's ok, we started the trans handle on our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * corresponding fs_root, which means it's been added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * dirty list. At commit time we'll still call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * btrfs_update_reloc_root() and update our root item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) reloc_root->last_trans = trans->transid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) trans->block_rsv = rc->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) replaced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) max_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) ret = walk_down_reloc_tree(reloc_root, path, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (!find_next_key(path, level, &key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ret = replace_path(trans, rc, root, reloc_root, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) &next_key, level, max_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) level = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) btrfs_node_key_to_cpu(path->nodes[level], &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) replaced = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ret = walk_up_reloc_tree(reloc_root, path, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) BUG_ON(level == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * save the merging progress in the drop_progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * this is OK since root refs == 1 in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) btrfs_node_key(path->nodes[level], &root_item->drop_progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) root_item->drop_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) btrfs_end_transaction_throttle(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (replaced && rc->stage == UPDATE_DATA_PTRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) invalidate_extent_cache(root, &key, &next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * handle the case only one block in the fs tree need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * relocated and the block is tree root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) leaf = btrfs_lock_root_node(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) BTRFS_NESTING_COW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) btrfs_tree_unlock(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) free_extent_buffer(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) insert_dirty_subvol(trans, rc, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) btrfs_end_transaction_throttle(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (replaced && rc->stage == UPDATE_DATA_PTRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) invalidate_extent_cache(root, &key, &next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) int prepare_to_merge(struct reloc_control *rc, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct btrfs_root *root = rc->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) LIST_HEAD(reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u64 num_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) mutex_lock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) rc->merging_rsv_size += rc->nodes_relocated * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) mutex_unlock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) num_bytes = rc->merging_rsv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) BTRFS_RESERVE_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) trans = btrfs_join_transaction(rc->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) btrfs_block_rsv_release(fs_info, rc->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) num_bytes, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (num_bytes != rc->merging_rsv_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) btrfs_block_rsv_release(fs_info, rc->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) num_bytes, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) rc->merge_reloc_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) while (!list_empty(&rc->reloc_roots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) reloc_root = list_entry(rc->reloc_roots.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) struct btrfs_root, root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) list_del_init(&reloc_root->root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) BUG_ON(IS_ERR(root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) BUG_ON(root->reloc_root != reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * set reference count to 1, so btrfs_recover_relocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * knows it should resumes merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) btrfs_set_root_refs(&reloc_root->root_item, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) btrfs_update_reloc_root(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) list_add(&reloc_root->root_list, &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) list_splice(&reloc_roots, &rc->reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) void free_reloc_roots(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct btrfs_root *reloc_root, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) list_for_each_entry_safe(reloc_root, tmp, list, root_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) __del_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) void merge_reloc_roots(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) LIST_HEAD(reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) root = rc->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * this serializes us with btrfs_record_root_in_transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * we have to make sure nobody is in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * adding their roots to the list while we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * doing this splice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) mutex_lock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) list_splice_init(&rc->reloc_roots, &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) mutex_unlock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) while (!list_empty(&reloc_roots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) reloc_root = list_entry(reloc_roots.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct btrfs_root, root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (btrfs_root_refs(&reloc_root->root_item) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) BUG_ON(IS_ERR(root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) BUG_ON(root->reloc_root != reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) ret = merge_reloc_root(rc, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (list_empty(&reloc_root->root_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) list_add_tail(&reloc_root->root_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (!IS_ERR(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (root->reloc_root == reloc_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) root->reloc_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) btrfs_put_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) &root->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) list_del_init(&reloc_root->root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* Don't forget to queue this reloc root for cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) list_add_tail(&reloc_root->reloc_dirty_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) &rc->dirty_subvol_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) btrfs_handle_fs_error(fs_info, ret, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) free_reloc_roots(&reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* new reloc root may be added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) mutex_lock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) list_splice_init(&rc->reloc_roots, &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) mutex_unlock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) free_reloc_roots(&reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * We used to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * here, but it's wrong. If we fail to start the transaction in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * prepare_to_merge() we will have only 0 ref reloc roots, none of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * have actually been removed from the reloc_root_tree rb tree. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * fine because we're bailing here, and we hold a reference on the root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * for the list that holds it, so these roots will be cleaned up when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * will be cleaned up on unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * The remaining nodes will be cleaned up by free_reloc_control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static void free_block_list(struct rb_root *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct tree_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) while ((rb_node = rb_first(blocks))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) block = rb_entry(rb_node, struct tree_block, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) rb_erase(rb_node, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) kfree(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct btrfs_root *reloc_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) struct btrfs_fs_info *fs_info = reloc_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (reloc_root->last_trans == trans->transid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) BUG_ON(IS_ERR(root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) BUG_ON(root->reloc_root != reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) ret = btrfs_record_root_in_trans(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct btrfs_backref_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct btrfs_backref_edge *edges[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct btrfs_backref_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) next = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) next = walk_up_backref(next, edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) root = next->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) BUG_ON(!root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) BUG_ON(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) record_reloc_root_in_trans(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) btrfs_record_root_in_trans(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (next->new_bytenr != root->node->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) BUG_ON(next->new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) BUG_ON(!list_empty(&next->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) next->new_bytenr = root->node->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) btrfs_put_root(next->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) next->root = btrfs_grab_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ASSERT(next->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) list_add_tail(&next->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) &rc->backref_cache.changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) mark_block_processed(rc, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) next = walk_down_backref(edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (!next || next->level <= node->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) next = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /* setup backref node path for btrfs_reloc_cow_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) rc->backref_cache.path[next->level] = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (--index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) next = edges[index]->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * Select a tree root for relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * Return NULL if the block is not shareable. We should use do_relocation() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * Return a tree root pointer if the block is shareable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * Return -ENOENT if the block is root of reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) struct btrfs_backref_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) struct btrfs_root *fs_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) next = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) next = walk_up_backref(next, edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) root = next->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) BUG_ON(!root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) /* No other choice for non-shareable tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) fs_root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (next != node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) next = walk_down_backref(edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!next || next->level <= node->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (!fs_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return fs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) u64 calcu_metadata_size(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct btrfs_backref_node *node, int reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct btrfs_backref_node *next = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) u64 num_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) BUG_ON(reserve && node->processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (next->processed && (reserve || next != node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) num_bytes += fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (list_empty(&next->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) edge = list_entry(next->upper.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) struct btrfs_backref_edge, list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) edges[index++] = edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) next = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) next = walk_down_backref(edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) static int reserve_metadata_space(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct btrfs_root *root = rc->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) num_bytes = calcu_metadata_size(rc, node, 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) trans->block_rsv = rc->block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) rc->reserved_bytes += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * We are under a transaction here so we can only do limited flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * If we get an enospc just kick back -EAGAIN so we know to drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * transaction and try to refill when we can flush all the things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) BTRFS_RESERVE_FLUSH_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) while (tmp <= rc->reserved_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) tmp <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) * only one thread can access block_rsv at this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) * so we don't need hold lock to protect block_rsv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) * we expand more reservation size here to allow enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * space for relocation and we will return earlier in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * enospc case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) rc->block_rsv->size = tmp + fs_info->nodesize *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) RELOCATION_RESERVED_NODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * relocate a block tree, and then update pointers in upper level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * blocks that reference the block to point to the new location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * if called by link_to_upper, the block has already been relocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * in that case this function just updates pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static int do_relocation(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) struct btrfs_backref_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct btrfs_path *path, int lowest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) u32 blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) u64 generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) BUG_ON(lowest && node->eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) path->lowest_level = node->level + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) rc->backref_cache.path[node->level] = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) list_for_each_entry(edge, &node->upper, list[LOWER]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) struct btrfs_key first_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) struct btrfs_ref ref = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) upper = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) root = select_reloc_root(trans, rc, upper, edges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) BUG_ON(!root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (upper->eb && !upper->locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (!lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) ret = btrfs_bin_search(upper->eb, key, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) bytenr = btrfs_node_blockptr(upper->eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (node->eb->start == bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) btrfs_backref_drop_node_buffer(upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (!upper->eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ret = btrfs_search_slot(trans, root, key, path, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (!upper->eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) upper->eb = path->nodes[upper->level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) path->nodes[upper->level] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) BUG_ON(upper->eb != path->nodes[upper->level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) upper->locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) path->locks[upper->level] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) slot = path->slots[upper->level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) ret = btrfs_bin_search(upper->eb, key, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) bytenr = btrfs_node_blockptr(upper->eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (bytenr != node->bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) btrfs_err(root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) bytenr, node->bytenr, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) upper->eb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (node->eb->start == bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) blocksize = root->fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) generation = btrfs_node_ptr_generation(upper->eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) eb = read_tree_block(fs_info, bytenr, generation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) upper->level - 1, &first_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) err = PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) btrfs_tree_lock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) btrfs_set_lock_blocking_write(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (!node->eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ret = btrfs_cow_block(trans, root, eb, upper->eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) slot, &eb, BTRFS_NESTING_COW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) btrfs_tree_unlock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) BUG_ON(node->eb != eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) btrfs_set_node_blockptr(upper->eb, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) node->eb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) btrfs_set_node_ptr_generation(upper->eb, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) trans->transid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) btrfs_mark_buffer_dirty(upper->eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) node->eb->start, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) upper->eb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) ref.real_root = root->root_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) btrfs_init_tree_ref(&ref, node->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) btrfs_header_owner(upper->eb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) ret = btrfs_inc_extent_ref(trans, &ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) if (!upper->pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) btrfs_backref_drop_node_buffer(upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) btrfs_backref_unlock_node_buffer(upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (!err && node->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) btrfs_backref_drop_node_buffer(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) list_move_tail(&node->list, &rc->backref_cache.changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) node->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) path->lowest_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) BUG_ON(err == -ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) static int link_to_upper(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) struct btrfs_backref_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) struct btrfs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) btrfs_node_key_to_cpu(node->eb, &key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return do_relocation(trans, rc, node, &key, path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static int finish_pending_nodes(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) struct btrfs_path *path, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct btrfs_backref_cache *cache = &rc->backref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) while (!list_empty(&cache->pending[level])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) node = list_entry(cache->pending[level].next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) list_move_tail(&node->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) BUG_ON(!node->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) ret = link_to_upper(trans, rc, node, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) list_splice_init(&list, &cache->pending[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * mark a block and all blocks directly/indirectly reference the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * as processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) static void update_processed_blocks(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) struct btrfs_backref_node *next = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (next->processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) mark_block_processed(rc, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (list_empty(&next->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) edge = list_entry(next->upper.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct btrfs_backref_edge, list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) edges[index++] = edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) next = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) next = walk_down_backref(edges, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) u32 blocksize = rc->extent_root->fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (test_range_bit(&rc->processed_blocks, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static int get_tree_block_key(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) struct tree_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) block->level, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) return PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (block->level == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) btrfs_item_key_to_cpu(eb, &block->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) btrfs_node_key_to_cpu(eb, &block->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) block->key_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) * helper function to relocate a tree block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) static int relocate_tree_block(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct btrfs_backref_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct btrfs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) * If we fail here we want to drop our backref_node because we are going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * to start over and regenerate the tree for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) ret = reserve_metadata_space(trans, rc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) BUG_ON(node->processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) root = select_one_root(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (root == ERR_PTR(-ENOENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) update_processed_blocks(rc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) BUG_ON(node->new_bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) BUG_ON(!list_empty(&node->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) btrfs_record_root_in_trans(trans, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) node->new_bytenr = root->node->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) btrfs_put_root(node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) node->root = btrfs_grab_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) ASSERT(node->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) list_add_tail(&node->list, &rc->backref_cache.changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) path->lowest_level = node->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) ret = btrfs_search_slot(trans, root, key, path, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) update_processed_blocks(rc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ret = do_relocation(trans, rc, node, key, path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (ret || node->level == 0 || node->cowonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) btrfs_backref_cleanup_node(&rc->backref_cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * relocate a list of blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) int relocate_tree_blocks(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct reloc_control *rc, struct rb_root *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) struct tree_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) struct tree_block *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) goto out_free_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /* Kick in readahead for tree blocks with missing keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (!block->key_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) readahead_tree_block(fs_info, block->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) /* Get first keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (!block->key_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) err = get_tree_block_key(fs_info, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) goto out_free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /* Do tree relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) node = build_backref_tree(rc, &block->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) block->level, block->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (IS_ERR(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) err = PTR_ERR(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) ret = relocate_tree_block(trans, rc, node, &block->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) err = finish_pending_nodes(trans, rc, path, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) out_free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) out_free_blocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) free_block_list(blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) static noinline_for_stack int prealloc_file_extent_cluster(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct btrfs_inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) struct file_extent_cluster *cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) u64 alloc_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) u64 offset = inode->index_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) u64 prealloc_start = cluster->start - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) u64 prealloc_end = cluster->end - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) u64 cur_offset = prealloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) BUG_ON(cluster->start != cluster->boundary[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) ret = btrfs_alloc_data_chunk_ondemand(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) prealloc_end + 1 - prealloc_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) inode_lock(&inode->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) for (nr = 0; nr < cluster->nr; nr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) start = cluster->boundary[nr] - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (nr + 1 < cluster->nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) end = cluster->boundary[nr + 1] - 1 - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) end = cluster->end - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) lock_extent(&inode->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) num_bytes = end + 1 - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) num_bytes, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) end + 1, &alloc_hint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) cur_offset = end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) unlock_extent(&inode->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) inode_unlock(&inode->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (cur_offset < prealloc_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) prealloc_end + 1 - cur_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) u64 block_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) struct extent_map *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) em = alloc_extent_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (!em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) em->start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) em->len = end + 1 - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) em->block_len = em->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) em->block_start = block_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) set_bit(EXTENT_FLAG_PINNED, &em->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) lock_extent(&BTRFS_I(inode)->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) write_lock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) ret = add_extent_mapping(em_tree, em, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) write_unlock(&em_tree->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (ret != -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) free_extent_map(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * Allow error injection to test balance cancellation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return atomic_read(&fs_info->balance_cancel_req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) fatal_signal_pending(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static int relocate_file_extent_cluster(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct file_extent_cluster *cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) u64 page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) u64 page_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) u64 offset = BTRFS_I(inode)->index_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) unsigned long last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct file_ra_state *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (!cluster->nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) ra = kzalloc(sizeof(*ra), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (!ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) file_ra_state_init(ra, inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) ret = setup_extent_mapping(inode, cluster->start - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) cluster->end - offset, cluster->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) index = (cluster->start - offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) last_index = (cluster->end - offset) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) while (index <= last_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) page = find_lock_page(inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) page_cache_sync_readahead(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) ra, NULL, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) last_index + 1 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) page = find_or_create_page(inode->i_mapping, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) btrfs_delalloc_release_metadata(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) PAGE_SIZE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) btrfs_delalloc_release_extents(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) if (PageReadahead(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) page_cache_async_readahead(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) ra, NULL, page, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) last_index + 1 - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) btrfs_readpage(NULL, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) btrfs_delalloc_release_metadata(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) PAGE_SIZE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) btrfs_delalloc_release_extents(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) page_start = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) page_end = page_start + PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) set_page_extent_mapped(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (nr < cluster->nr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) page_start + offset == cluster->boundary[nr]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) set_extent_bits(&BTRFS_I(inode)->io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) page_start, page_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) EXTENT_BOUNDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) page_end, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) btrfs_delalloc_release_metadata(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) PAGE_SIZE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) btrfs_delalloc_release_extents(BTRFS_I(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) clear_extent_bits(&BTRFS_I(inode)->io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) page_start, page_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) EXTENT_LOCKED | EXTENT_BOUNDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) unlock_extent(&BTRFS_I(inode)->io_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) page_start, page_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) balance_dirty_pages_ratelimited(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) btrfs_throttle(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (btrfs_should_cancel_balance(fs_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ret = -ECANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) WARN_ON(nr != cluster->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) struct file_extent_cluster *cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) ret = relocate_file_extent_cluster(inode, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) cluster->nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) if (!cluster->nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) cluster->start = extent_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) BUG_ON(cluster->nr >= MAX_EXTENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) cluster->end = extent_key->objectid + extent_key->offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) cluster->boundary[cluster->nr] = extent_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) cluster->nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (cluster->nr >= MAX_EXTENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) ret = relocate_file_extent_cluster(inode, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) cluster->nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * helper to add a tree block to the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * the major work is getting the generation and level of the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) static int add_tree_block(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) struct btrfs_key *extent_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) struct rb_root *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) struct btrfs_tree_block_info *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct tree_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) int level = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) u64 generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) item_size = btrfs_item_size_nr(eb, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) item_size >= sizeof(*ei) + sizeof(*bi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ei = btrfs_item_ptr(eb, path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) bi = (struct btrfs_tree_block_info *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) level = btrfs_tree_block_level(eb, bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) level = (int)extent_key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) generation = btrfs_extent_generation(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) btrfs_print_v0_err(eb->fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) BUG_ON(level == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) block = kmalloc(sizeof(*block), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) block->bytenr = extent_key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) block->key.objectid = rc->extent_root->fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) block->key.offset = generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) block->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) block->key_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) static int __add_tree_block(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) u64 bytenr, u32 blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) struct rb_root *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (tree_block_processed(bytenr, rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (rb_simple_search(blocks, bytenr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) key.objectid = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) if (skinny) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) key.type = BTRFS_METADATA_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) key.offset = blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) if (ret > 0 && skinny) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) if (path->slots[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) btrfs_item_key_to_cpu(path->nodes[0], &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (key.objectid == bytenr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) (key.type == BTRFS_METADATA_ITEM_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) (key.type == BTRFS_EXTENT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) key.offset == blocksize)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) skinny = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) ASSERT(ret == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) btrfs_print_leaf(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) "tree block extent item (%llu) is not found in extent tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) ret = add_tree_block(rc, &key, path, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) struct btrfs_block_group *block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) u64 ino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) struct btrfs_root *root = fs_info->tree_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) inode = btrfs_iget(fs_info->sb, ino, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) truncate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) ret = btrfs_check_trunc_cache_free_space(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) &fs_info->global_block_rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) trans = btrfs_join_transaction(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * cache inode, to avoid free space cache data extent blocking data relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static int delete_v1_space_cache(struct extent_buffer *leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) struct btrfs_block_group *block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) u64 data_bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) u64 space_cache_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) struct btrfs_file_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) for (i = 0; i < btrfs_header_nritems(leaf); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) btrfs_item_key_to_cpu(leaf, &key, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) if (key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) type = btrfs_file_extent_type(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) if ((type == BTRFS_FILE_EXTENT_REG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) type == BTRFS_FILE_EXTENT_PREALLOC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) space_cache_ino = key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) space_cache_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * helper to find all tree blocks that reference a given data extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) int add_data_references(struct reloc_control *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) struct btrfs_key *extent_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) struct rb_root *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct ulist *leaves = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct ulist_iterator leaf_uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct ulist_node *ref_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) const u32 blocksize = fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 0, &leaves, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) ULIST_ITER_INIT(&leaf_uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) eb = read_tree_block(fs_info, ref_node->val, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) ret = PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) ret = delete_v1_space_cache(eb, rc->block_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) extent_key->objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) free_block_list(blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) ulist_free(leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * helper to find next unprocessed extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) struct btrfs_key *extent_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) u64 start, end, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) last = rc->block_group->start + rc->block_group->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (rc->search_start >= last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) key.objectid = rc->search_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (path->slots[0] >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) ret = btrfs_next_leaf(rc->extent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) if (key.objectid >= last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (key.type != BTRFS_EXTENT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) key.type != BTRFS_METADATA_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (key.type == BTRFS_EXTENT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) key.objectid + key.offset <= rc->search_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (key.type == BTRFS_METADATA_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) key.objectid + fs_info->nodesize <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) rc->search_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) path->slots[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ret = find_first_extent_bit(&rc->processed_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) key.objectid, &start, &end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) EXTENT_DIRTY, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (ret == 0 && start <= key.objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) rc->search_start = end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (key.type == BTRFS_EXTENT_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) rc->search_start = key.objectid + key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) rc->search_start = key.objectid +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) memcpy(extent_key, &key, sizeof(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) static void set_reloc_control(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) mutex_lock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) fs_info->reloc_ctl = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) mutex_unlock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) static void unset_reloc_control(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) mutex_lock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) fs_info->reloc_ctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) mutex_unlock(&fs_info->reloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) static int check_extent_flags(u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) int prepare_to_relocate(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) BTRFS_BLOCK_RSV_TEMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (!rc->block_rsv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) memset(&rc->cluster, 0, sizeof(rc->cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) rc->search_start = rc->block_group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) rc->extents_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) rc->nodes_relocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) rc->merging_rsv_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) rc->reserved_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) RELOCATION_RESERVED_NODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) ret = btrfs_block_rsv_refill(rc->extent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) rc->block_rsv, rc->block_rsv->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) BTRFS_RESERVE_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) rc->create_reloc_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) set_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) trans = btrfs_join_transaction(rc->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) unset_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) * extent tree is not a ref_cow tree and has no reloc_root to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) * cleanup. And callers are responsible to free the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * block rsv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) struct rb_root blocks = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) struct btrfs_trans_handle *trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) int progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) path->reada = READA_FORWARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) ret = prepare_to_relocate(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) rc->reserved_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) ret = btrfs_block_rsv_refill(rc->extent_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) rc->block_rsv, rc->block_rsv->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) BTRFS_RESERVE_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) progress++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) trans = btrfs_start_transaction(rc->extent_root, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) err = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) if (update_backref_cache(trans, &rc->backref_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) ret = find_next_extent(rc, path, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) rc->extents_found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (item_size >= sizeof(*ei)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) flags = btrfs_extent_flags(path->nodes[0], ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) ret = check_extent_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) BUG_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) btrfs_print_v0_err(trans->fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) btrfs_abort_transaction(trans, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) ret = add_tree_block(rc, &key, path, &blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) } else if (rc->stage == UPDATE_DATA_PTRS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) (flags & BTRFS_EXTENT_FLAG_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) ret = add_data_references(rc, &key, path, &blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (!RB_EMPTY_ROOT(&blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) ret = relocate_tree_blocks(trans, rc, &blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) rc->extents_found--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) rc->search_start = key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) btrfs_end_transaction_throttle(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (rc->stage == MOVE_DATA_EXTENTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) (flags & BTRFS_EXTENT_FLAG_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) rc->found_file_extent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) ret = relocate_data_extent(rc->data_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) &key, &rc->cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (btrfs_should_cancel_balance(fs_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) err = -ECANCELED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (trans && progress && err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) if (trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) btrfs_end_transaction_throttle(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) ret = relocate_file_extent_cluster(rc->data_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) &rc->cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) rc->create_reloc_tree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) set_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) btrfs_backref_release_cache(&rc->backref_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) * Even in the case when the relocation is cancelled, we should all go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) * through prepare_to_merge() and merge_reloc_roots().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) * For error (including cancelled balance), prepare_to_merge() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) * mark all reloc trees orphan, then queue them for cleanup in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) * merge_reloc_roots()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) err = prepare_to_merge(rc, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) merge_reloc_roots(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) rc->merge_reloc_tree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) unset_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) /* get rid of pinned extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) trans = btrfs_join_transaction(rc->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) err = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) ret = clean_dirty_subvols(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (ret < 0 && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) btrfs_free_block_rsv(fs_info, rc->block_rsv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) struct btrfs_root *root, u64 objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) struct btrfs_inode_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) ret = btrfs_insert_empty_inode(trans, root, path, objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) btrfs_set_inode_generation(leaf, item, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) btrfs_set_inode_size(leaf, item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) BTRFS_INODE_PREALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) btrfs_mark_buffer_dirty(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) * helper to create inode for data relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) * the inode is in data relocation tree and its link count is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) static noinline_for_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) struct btrfs_block_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) u64 objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) root = btrfs_grab_root(fs_info->data_reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) trans = btrfs_start_transaction(root, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return ERR_CAST(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) err = btrfs_find_free_objectid(root, &objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) err = __insert_orphan_inode(trans, root, objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) BUG_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) inode = btrfs_iget(fs_info->sb, objectid, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) BUG_ON(IS_ERR(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) BTRFS_I(inode)->index_cnt = group->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) err = btrfs_orphan_add(trans, BTRFS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) btrfs_btree_balance_dirty(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) inode = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) struct reloc_control *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) rc = kzalloc(sizeof(*rc), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) INIT_LIST_HEAD(&rc->reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) INIT_LIST_HEAD(&rc->dirty_subvol_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) mapping_tree_init(&rc->reloc_root_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) extent_io_tree_init(fs_info, &rc->processed_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) IO_TREE_RELOC_BLOCKS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) static void free_reloc_control(struct reloc_control *rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) struct mapping_node *node, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) free_reloc_roots(&rc->reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) rbtree_postorder_for_each_entry_safe(node, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) &rc->reloc_root_tree.rb_root, rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) kfree(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * Print the block group being relocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static void describe_relocation(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) struct btrfs_block_group *block_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) char buf[128] = {'\0'};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) btrfs_info(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) "relocating block group %llu flags %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) block_group->start, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) static const char *stage_to_string(int stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) if (stage == MOVE_DATA_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) return "move data extents";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (stage == UPDATE_DATA_PTRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) return "update data pointers";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) * function to relocate all extents in a block group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) struct btrfs_block_group *bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) struct btrfs_root *extent_root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) struct reloc_control *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) int rw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) bg = btrfs_lookup_block_group(fs_info, group_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) if (!bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) if (btrfs_pinned_by_swapfile(fs_info, bg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) return -ETXTBSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) rc = alloc_reloc_control(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) btrfs_put_block_group(bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) rc->extent_root = extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) rc->block_group = bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) ret = btrfs_inc_block_group_ro(rc->block_group, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) rw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) inode = lookup_free_space_inode(rc->block_group, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) if (!IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) ret = PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (ret && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (IS_ERR(rc->data_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) err = PTR_ERR(rc->data_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) rc->data_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) describe_relocation(fs_info, rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) btrfs_wait_block_group_reservations(rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) btrfs_wait_nocow_writers(rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) btrfs_wait_ordered_roots(fs_info, U64_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) rc->block_group->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) rc->block_group->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) int finishes_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) mutex_lock(&fs_info->cleaner_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) ret = relocate_block_group(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) mutex_unlock(&fs_info->cleaner_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) finishes_stage = rc->stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) * We may have gotten ENOSPC after we already dirtied some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) * extents. If writeout happens while we're relocating a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) * different block group we could end up hitting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) * btrfs_reloc_cow_block. Make sure we write everything out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) * properly so we don't trip over this problem, and then break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) * out of the loop if we hit an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) ret = btrfs_wait_ordered_range(rc->data_inode, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) (u64)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) invalidate_mapping_pages(rc->data_inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) rc->stage = UPDATE_DATA_PTRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (rc->extents_found == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) btrfs_info(fs_info, "found %llu extents, stage: %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) rc->extents_found, stage_to_string(finishes_stage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) WARN_ON(rc->block_group->pinned > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) WARN_ON(rc->block_group->reserved > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) WARN_ON(rc->block_group->used > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) if (err && rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) btrfs_dec_block_group_ro(rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) iput(rc->data_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) btrfs_put_block_group(rc->block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) free_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) int ret, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) trans = btrfs_start_transaction(fs_info->tree_root, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (IS_ERR(trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) memset(&root->root_item.drop_progress, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) sizeof(root->root_item.drop_progress));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) root->root_item.drop_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) btrfs_set_root_refs(&root->root_item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) ret = btrfs_update_root(trans, fs_info->tree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) &root->root_key, &root->root_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) err = btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) * recover relocation interrupted by system crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) * this function resumes merging reloc trees with corresponding fs trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) * this is important for keeping the sharing of tree blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) int btrfs_recover_relocation(struct btrfs_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) LIST_HEAD(reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) struct btrfs_root *fs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) struct reloc_control *rc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) path->reada = READA_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) key.objectid = BTRFS_TREE_RELOC_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) key.type = BTRFS_ROOT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (path->slots[0] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) key.type != BTRFS_ROOT_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) reloc_root = btrfs_read_tree_root(root, &key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) if (IS_ERR(reloc_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) err = PTR_ERR(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) list_add(&reloc_root->root_list, &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (btrfs_root_refs(&reloc_root->root_item) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) fs_root = btrfs_get_fs_root(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) reloc_root->root_key.offset, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (IS_ERR(fs_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) ret = PTR_ERR(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) if (ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) ret = mark_garbage_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) btrfs_put_root(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) if (key.offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) key.offset--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (list_empty(&reloc_roots))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) rc = alloc_reloc_control(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) rc->extent_root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) set_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) trans = btrfs_join_transaction(rc->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) err = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) goto out_unset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) rc->merge_reloc_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) while (!list_empty(&reloc_roots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) reloc_root = list_entry(reloc_roots.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) struct btrfs_root, root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) list_del(&reloc_root->root_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (btrfs_root_refs(&reloc_root->root_item) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) list_add_tail(&reloc_root->root_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) &rc->reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) if (IS_ERR(fs_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) err = PTR_ERR(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) list_add_tail(&reloc_root->root_list, &reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) goto out_unset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) err = __add_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) BUG_ON(err < 0); /* -ENOMEM or logic error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) fs_root->reloc_root = btrfs_grab_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) btrfs_put_root(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) err = btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) goto out_unset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) merge_reloc_roots(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) unset_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) trans = btrfs_join_transaction(rc->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) err = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) goto out_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) err = btrfs_commit_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) out_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) ret = clean_dirty_subvols(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) if (ret < 0 && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) out_unset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) unset_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) free_reloc_control(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) free_reloc_roots(&reloc_roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) /* cleanup orphan inode in data relocation tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) fs_root = btrfs_grab_root(fs_info->data_reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) ASSERT(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) err = btrfs_orphan_cleanup(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) btrfs_put_root(fs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) * helper to add ordered checksum for data relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) * cloning checksum properly handles the nodatasum extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) * it also saves CPU time to re-calculate the checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) struct btrfs_fs_info *fs_info = inode->root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) struct btrfs_ordered_sum *sums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct btrfs_ordered_extent *ordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) u64 disk_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) u64 new_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) ordered = btrfs_lookup_ordered_extent(inode, file_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) disk_bytenr = file_pos + inode->index_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) disk_bytenr + len - 1, &list, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) while (!list_empty(&list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) sums = list_entry(list.next, struct btrfs_ordered_sum, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) list_del_init(&sums->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) * We need to offset the new_bytenr based on where the csum is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) * We need to do this because we will read in entire prealloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) * extents but we may have written to say the middle of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) * prealloc extent, so we need to make sure the csum goes with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) * the right disk offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) * We can do this because the data reloc inode refers strictly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) * to the on disk bytes, so we don't have to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) * disk_len vs real len like with real inodes since it's all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) * disk length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) sums->bytenr = new_bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) btrfs_add_ordered_sum(ordered, sums);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) btrfs_put_ordered_extent(ordered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) struct btrfs_root *root, struct extent_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) struct extent_buffer *cow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) struct reloc_control *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) int first_cow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) rc = fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) level = btrfs_header_level(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (btrfs_header_generation(buf) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) btrfs_root_last_snapshot(&root->root_item))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) first_cow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) rc->create_reloc_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) WARN_ON(!first_cow && level == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) node = rc->backref_cache.path[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) BUG_ON(node->bytenr != buf->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) node->new_bytenr != buf->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) btrfs_backref_drop_node_buffer(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) atomic_inc(&cow->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) node->eb = cow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) node->new_bytenr = cow->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) if (!node->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) list_move_tail(&node->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) &rc->backref_cache.pending[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) node->pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) if (first_cow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) mark_block_processed(rc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) if (first_cow && level > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) rc->nodes_relocated += buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) ret = replace_file_extents(trans, rc, root, cow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) * called before creating snapshot. it calculates metadata reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) * required for relocating tree blocks in the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) u64 *bytes_to_reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct btrfs_root *root = pending->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) struct reloc_control *rc = root->fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) if (!rc || !have_reloc_root(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if (!rc->merge_reloc_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) root = root->reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) BUG_ON(btrfs_root_refs(&root->root_item) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) * relocation is in the stage of merging trees. the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * used by merging a reloc tree is twice the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * relocated tree nodes in the worst case. half for cowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) * the reloc tree, half for cowing the fs tree. the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) * used by cowing the reloc tree will be freed after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) * tree is dropped. if we create snapshot, cowing the fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * tree may use more space than it frees. so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * reserve extra space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) *bytes_to_reserve += rc->nodes_relocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) * called after snapshot is created. migrate block reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * and create reloc root for the newly created snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) * This is similar to btrfs_init_reloc_root(), we come out of here with two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) * references held on the reloc_root, one for root->reloc_root and one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) * rc->reloc_roots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) struct btrfs_pending_snapshot *pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) struct btrfs_root *root = pending->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) struct btrfs_root *reloc_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) struct btrfs_root *new_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) struct reloc_control *rc = root->fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) if (!rc || !have_reloc_root(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) rc = root->fs_info->reloc_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) rc->merging_rsv_size += rc->nodes_relocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) if (rc->merge_reloc_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) ret = btrfs_block_rsv_migrate(&pending->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) rc->block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) rc->nodes_relocated, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) new_root = pending->snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) reloc_root = create_reloc_root(trans, root->reloc_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) new_root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) if (IS_ERR(reloc_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) return PTR_ERR(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) ret = __add_reloc_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) BUG_ON(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) new_root->reloc_root = btrfs_grab_root(reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) if (rc->create_reloc_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) ret = clone_backref_node(trans, rc, root, reloc_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }