^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2011 STRATO. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <trace/events/btrfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "backref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ulist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "delayed-ref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "locking.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "misc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* Just an arbitrary number so we can be sure this happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define BACKREF_FOUND_SHARED 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct extent_inode_elem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u64 inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct extent_inode_elem *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int check_extent_in_eb(const struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) const struct btrfs_file_extent_item *fi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct extent_inode_elem **eie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct extent_inode_elem *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (!ignore_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) !btrfs_file_extent_compression(eb, fi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) !btrfs_file_extent_encryption(eb, fi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) !btrfs_file_extent_other_encoding(eb, fi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u64 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u64 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) data_offset = btrfs_file_extent_offset(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) data_len = btrfs_file_extent_num_bytes(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (extent_item_pos < data_offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extent_item_pos >= data_offset + data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) offset = extent_item_pos - data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) e = kmalloc(sizeof(*e), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) e->next = *eie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) e->inum = key->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) e->offset = key->offset + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *eie = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void free_inode_elem_list(struct extent_inode_elem *eie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct extent_inode_elem *eie_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) for (; eie; eie = eie_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) eie_next = eie->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) kfree(eie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int find_extent_in_eb(const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u64 wanted_disk_byte, u64 extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct extent_inode_elem **eie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u64 disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int nritems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int extent_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * from the shared data ref, we only have the leaf but we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * the key. thus, we must look into all items and see that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * find one (some) with a reference to our extent item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) nritems = btrfs_header_nritems(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) for (slot = 0; slot < nritems; ++slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) btrfs_item_key_to_cpu(eb, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) extent_type = btrfs_file_extent_type(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (extent_type == BTRFS_FILE_EXTENT_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (disk_byte != wanted_disk_byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct preftree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct rb_root_cached root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct preftrees {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct preftree indirect_missing_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Checks for a shared extent during backref search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * The share_count tracks prelim_refs (direct and indirect) having a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * ref->count >0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * - incremented when a ref->count transitions to >0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * - decremented when a ref->count transitions to <1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct share_check {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u64 root_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u64 inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int share_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline int extent_is_shared(struct share_check *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static struct kmem_cache *btrfs_prelim_ref_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int __init btrfs_prelim_ref_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sizeof(struct prelim_ref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) SLAB_MEM_SPREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!btrfs_prelim_ref_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void __cold btrfs_prelim_ref_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kmem_cache_destroy(btrfs_prelim_ref_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void free_pref(struct prelim_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) kmem_cache_free(btrfs_prelim_ref_cache, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Return 0 when both refs are for the same block (and can be merged).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * indicates a 'higher' block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int prelim_ref_compare(struct prelim_ref *ref1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct prelim_ref *ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (ref1->level < ref2->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (ref1->level > ref2->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (ref1->root_id < ref2->root_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (ref1->root_id > ref2->root_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (ref1->key_for_search.type < ref2->key_for_search.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (ref1->key_for_search.type > ref2->key_for_search.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (ref1->key_for_search.offset < ref2->key_for_search.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ref1->key_for_search.offset > ref2->key_for_search.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (ref1->parent < ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (ref1->parent > ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void update_share_count(struct share_check *sc, int oldcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int newcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if ((!sc) || (oldcount == 0 && newcount < 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (oldcount > 0 && newcount < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sc->share_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else if (oldcount < 1 && newcount > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) sc->share_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Add @newref to the @root rbtree, merging identical refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Callers should assume that newref has been freed after calling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct preftree *preftree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct prelim_ref *newref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct share_check *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct rb_root_cached *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct rb_node **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct prelim_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) root = &preftree->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) p = &root->rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ref = rb_entry(parent, struct prelim_ref, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) result = prelim_ref_compare(ref, newref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (result < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else if (result > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Identical refs, merge them and free @newref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct extent_inode_elem *eie = ref->inode_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) while (eie && eie->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) eie = eie->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!eie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ref->inode_list = newref->inode_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) eie->next = newref->inode_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) preftree->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * A delayed ref can have newref->count < 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * The ref->count is updated to follow any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * BTRFS_[ADD|DROP]_DELAYED_REF actions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) update_share_count(sc, ref->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ref->count + newref->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ref->count += newref->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) free_pref(newref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) update_share_count(sc, 0, newref->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) preftree->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rb_link_node(&newref->rbnode, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rb_insert_color_cached(&newref->rbnode, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Release the entire tree. We don't care about internal consistency so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * just free everything and then reset the tree root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static void prelim_release(struct preftree *preftree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct prelim_ref *ref, *next_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rbtree_postorder_for_each_entry_safe(ref, next_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) &preftree->root.rb_root, rbnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) preftree->root = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) preftree->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * the rules for all callers of this function are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * - obtaining the parent is the goal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * - if you add a key, you must know that it is a correct key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * - if you cannot add the parent or a correct key, then we will look into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * block later to set a correct key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * delayed refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * ============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * backref type | shared | indirect | shared | indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * information | tree | tree | data | data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * --------------------+--------+----------+--------+----------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * parent logical | y | - | - | -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * key to resolve | - | y | y | y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * tree block logical | - | - | - | -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * root for resolving | y | y | y | y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * - column 1: we've the parent -> done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * - column 2, 3, 4: we use the key to find the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * on disk refs (inline or keyed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * ==============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * backref type | shared | indirect | shared | indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * information | tree | tree | data | data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * --------------------+--------+----------+--------+----------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * parent logical | y | - | y | -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * key to resolve | - | - | - | y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * tree block logical | y | y | y | y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * root for resolving | - | y | y | y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * - column 1, 3: we've the parent -> done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * - column 2: we take the first key from the block to find the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * (see add_missing_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * - column 4: we use the key to find the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * additional information that's available but not required to find the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * block might help in merging entries to gain some speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct preftree *preftree, u64 root_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) const struct btrfs_key *key, int level, u64 parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u64 wanted_disk_byte, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct share_check *sc, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct prelim_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ref->root_id = root_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ref->key_for_search = *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ref->inode_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ref->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ref->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ref->wanted_disk_byte = wanted_disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) prelim_ref_insert(fs_info, preftree, ref, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return extent_is_shared(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* direct refs use root == 0, key == NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int add_direct_ref(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct preftrees *preftrees, int level, u64 parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u64 wanted_disk_byte, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct share_check *sc, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) parent, wanted_disk_byte, count, sc, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* indirect refs use parent == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct preftrees *preftrees, u64 root_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) const struct btrfs_key *key, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u64 wanted_disk_byte, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct share_check *sc, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct preftree *tree = &preftrees->indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tree = &preftrees->indirect_missing_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) wanted_disk_byte, count, sc, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct prelim_ref *ref = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct prelim_ref target = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) target.parent = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ref = rb_entry(parent, struct prelim_ref, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) result = prelim_ref_compare(ref, &target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct ulist *parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct preftrees *preftrees, struct prelim_ref *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int level, u64 time_seq, const u64 *extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct btrfs_key *key_for_search = &ref->key_for_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct btrfs_file_extent_item *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct extent_inode_elem *eie = NULL, *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u64 disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u64 wanted_disk_byte = ref->wanted_disk_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u64 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u64 data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (level != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * 1. We normally enter this function with the path already pointing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * the first item to check. But sometimes, we may enter it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * slot == nritems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * 2. We are searching for normal backref but bytenr of this leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * matches shared data backref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * 3. The leaf owner is not equal to the root we are searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * For these cases, go to the next leaf before we continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (path->slots[0] >= btrfs_header_nritems(eb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) is_shared_data_backref(preftrees, eb->start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ref->root_id != btrfs_header_owner(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = btrfs_next_old_leaf(root, path, time_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) while (!ret && count < ref->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) btrfs_item_key_to_cpu(eb, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (key.objectid != key_for_search->objectid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) key.type != BTRFS_EXTENT_DATA_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * We are searching for normal backref but bytenr of this leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * matches shared data backref, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * the leaf owner is not equal to the root we are searching for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (slot == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) (is_shared_data_backref(preftrees, eb->start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ref->root_id != btrfs_header_owner(eb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = btrfs_next_old_leaf(root, path, time_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) data_offset = btrfs_file_extent_offset(eb, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (disk_byte == wanted_disk_byte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) eie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (ref->key_for_search.offset == key.offset - data_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (extent_item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ret = check_extent_in_eb(&key, eb, fi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) &eie, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ret = ulist_add_merge_ptr(parents, eb->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) eie, (void **)&old, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!ret && extent_item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) while (old->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) old = old->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) old->next = eie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) eie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = btrfs_next_item(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ret = btrfs_next_old_item(root, path, time_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) free_inode_elem_list(eie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * resolve an indirect backref in the form (root_id, key, level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * to a logical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct btrfs_path *path, u64 time_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct preftrees *preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct prelim_ref *ref, struct ulist *parents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) const u64 *extent_item_pos, bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int root_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int level = ref->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct btrfs_key search_key = ref->key_for_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * If we're search_commit_root we could possibly be holding locks on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * other tree nodes. This happens when qgroups does backref walks when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * adding new delayed refs. To deal with this we need to look in cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * for the root, and if we don't find it then we need to search the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (path->search_commit_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) root = btrfs_get_fs_root(fs_info, ref->root_id, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (IS_ERR(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = PTR_ERR(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!path->search_commit_root &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) test_bit(BTRFS_ROOT_DELETING, &root->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (btrfs_is_testing(fs_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (path->search_commit_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) root_level = btrfs_header_level(root->commit_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) root_level = btrfs_header_level(root->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) root_level = btrfs_old_root_level(root, time_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (root_level + 1 == level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * We can often find data backrefs with an offset that is too large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * subtracting a file's offset with the data offset of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * corresponding extent data item. This can happen for example in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * clone ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * So if we detect such case we set the search key's offset to zero to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * make sure we will find the matching file extent item at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * add_all_parents(), otherwise we will miss it because the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * taken form the backref is much larger then the offset of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * extent item. This can make us scan a very large number of file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * extent items, but at least it will not make us miss any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * This is an ugly workaround for a behaviour that should have never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * existed, but it does and a fix for the clone ioctl would touch a lot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * of places, cause backwards incompatibility and would not fix the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * problem for extents cloned with older kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) search_key.offset >= LLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) search_key.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) path->lowest_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ref->root_id, level, ref->count, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ref->key_for_search.objectid, ref->key_for_search.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ref->key_for_search.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) while (!eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (WARN_ON(!level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) level--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ret = add_all_parents(root, path, parents, preftrees, ref, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) time_seq, extent_item_pos, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) path->lowest_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static struct extent_inode_elem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unode_aux_to_inode_list(struct ulist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return (struct extent_inode_elem *)(uintptr_t)node->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * We maintain three separate rbtrees: one for direct refs, one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * indirect refs which have a key, and one for indirect refs which do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * have a key. Each tree does merge on insertion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Once all of the references are located, we iterate over the tree of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * indirect refs with missing keys. An appropriate key is located and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * the ref is moved onto the tree for indirect refs. After all missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * keys are thus located, we iterate over the indirect ref tree, resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * each reference, and then insert the resolved reference onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * direct tree (merging there too).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * New backrefs (i.e., for parent nodes) are added to the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * rbtree as they are encountered. The new backrefs are subsequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * resolved as above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct btrfs_path *path, u64 time_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct preftrees *preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) const u64 *extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct share_check *sc, bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct ulist *parents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct ulist_iterator uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct rb_node *rnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) parents = ulist_alloc(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!parents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * We could trade memory usage for performance here by iterating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * the tree, allocating new refs for each insertion, and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * freeing the entire indirect tree when we're done. In some test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * cases, the tree can grow quite large (~200k objects).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct prelim_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ref = rb_entry(rnode, struct prelim_ref, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (WARN(ref->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) "BUG: direct ref found in indirect tree")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) preftrees->indirect.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ref->count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (sc && sc->root_objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ref->root_id != sc->root_objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = BACKREF_FOUND_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ref, parents, extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * we can only tolerate ENOENT,otherwise,we should catch error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * and return directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) prelim_ref_insert(fs_info, &preftrees->direct, ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) } else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* we put the first parent into the ref at hand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ULIST_ITER_INIT(&uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) node = ulist_next(parents, &uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ref->parent = node ? node->val : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ref->inode_list = unode_aux_to_inode_list(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Add a prelim_ref(s) for any other parent(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) while ((node = ulist_next(parents, &uiter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct prelim_ref *new_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!new_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) memcpy(new_ref, ref, sizeof(*ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) new_ref->parent = node->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) new_ref->inode_list = unode_aux_to_inode_list(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) prelim_ref_insert(fs_info, &preftrees->direct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) new_ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Now it's a direct ref, put it in the direct tree. We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * do this last because the ref could be merged/freed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ulist_reinit(parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ulist_free(parents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * read tree blocks and add keys where required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int add_missing_keys(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct preftrees *preftrees, bool lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct prelim_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct preftree *tree = &preftrees->indirect_missing_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) while ((node = rb_first_cached(&tree->root))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ref = rb_entry(node, struct prelim_ref, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) rb_erase_cached(node, &tree->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) BUG_ON(ref->parent); /* should not be a direct ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) BUG_ON(ref->key_for_search.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) BUG_ON(!ref->wanted_disk_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ref->level - 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) free_pref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) btrfs_tree_read_lock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (btrfs_header_level(eb) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) btrfs_tree_read_unlock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * add all currently queued delayed refs from this head whose seq nr is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * smaller or equal that seq to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct btrfs_delayed_ref_head *head, u64 seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct preftrees *preftrees, struct share_check *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct btrfs_delayed_ref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct btrfs_delayed_extent_op *extent_op = head->extent_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct btrfs_key tmp_op_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (extent_op && extent_op->update_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_lock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) node = rb_entry(n, struct btrfs_delayed_ref_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (node->seq > seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) switch (node->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) case BTRFS_ADD_DELAYED_EXTENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) case BTRFS_UPDATE_DELAYED_HEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) case BTRFS_ADD_DELAYED_REF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) count = node->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) case BTRFS_DROP_DELAYED_REF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) count = node->ref_mod * -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) switch (node->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case BTRFS_TREE_BLOCK_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* NORMAL INDIRECT METADATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct btrfs_delayed_tree_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ref = btrfs_delayed_node_to_tree_ref(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ret = add_indirect_ref(fs_info, preftrees, ref->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) &tmp_op_key, ref->level + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) node->bytenr, count, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case BTRFS_SHARED_BLOCK_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* SHARED DIRECT METADATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct btrfs_delayed_tree_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ref = btrfs_delayed_node_to_tree_ref(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ref->parent, node->bytenr, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) sc, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) case BTRFS_EXTENT_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* NORMAL INDIRECT DATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct btrfs_delayed_data_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ref = btrfs_delayed_node_to_data_ref(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) key.objectid = ref->objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) key.offset = ref->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * Found a inum that doesn't match our known inum, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * know it's shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (sc && sc->inum && ref->objectid != sc->inum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = BACKREF_FOUND_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ret = add_indirect_ref(fs_info, preftrees, ref->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) &key, 0, node->bytenr, count, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) case BTRFS_SHARED_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* SHARED DIRECT FULL backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct btrfs_delayed_data_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ref = btrfs_delayed_node_to_data_ref(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) node->bytenr, count, sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * We must ignore BACKREF_FOUND_SHARED until all delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * refs have been checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret && (ret != BACKREF_FOUND_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = extent_is_shared(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * add all inline backrefs for bytenr to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static int add_inline_refs(const struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct btrfs_path *path, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int *info_level, struct preftrees *preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct share_check *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) u64 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * enumerate all inline refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) item_size = btrfs_item_size_nr(leaf, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) BUG_ON(item_size < sizeof(*ei));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) flags = btrfs_extent_flags(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) btrfs_item_key_to_cpu(leaf, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ptr = (unsigned long)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) end = (unsigned long)ei + item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct btrfs_tree_block_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) info = (struct btrfs_tree_block_info *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *info_level = btrfs_tree_block_level(leaf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ptr += sizeof(struct btrfs_tree_block_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) BUG_ON(ptr > end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) *info_level = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) while (ptr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct btrfs_extent_inline_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) iref = (struct btrfs_extent_inline_ref *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) type = btrfs_get_extent_inline_ref_type(leaf, iref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) BTRFS_REF_TYPE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (type == BTRFS_REF_TYPE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) offset = btrfs_extent_inline_ref_offset(leaf, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case BTRFS_SHARED_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ret = add_direct_ref(fs_info, preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) *info_level + 1, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) bytenr, 1, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) case BTRFS_SHARED_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct btrfs_shared_data_ref *sdref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) sdref = (struct btrfs_shared_data_ref *)(iref + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) count = btrfs_shared_data_ref_count(leaf, sdref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ret = add_direct_ref(fs_info, preftrees, 0, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) bytenr, count, sc, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case BTRFS_TREE_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ret = add_indirect_ref(fs_info, preftrees, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) NULL, *info_level + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) bytenr, 1, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case BTRFS_EXTENT_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct btrfs_extent_data_ref *dref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) dref = (struct btrfs_extent_data_ref *)(&iref->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) count = btrfs_extent_data_ref_count(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) key.objectid = btrfs_extent_data_ref_objectid(leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) key.offset = btrfs_extent_data_ref_offset(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (sc && sc->inum && key.objectid != sc->inum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ret = BACKREF_FOUND_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) root = btrfs_extent_data_ref_root(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = add_indirect_ref(fs_info, preftrees, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) &key, 0, bytenr, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) sc, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ptr += btrfs_extent_inline_ref_size(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * add all non-inline backrefs for bytenr to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static int add_keyed_refs(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct btrfs_path *path, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int info_level, struct preftrees *preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct share_check *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct btrfs_root *extent_root = fs_info->extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = btrfs_next_item(extent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (key.objectid != bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (key.type > BTRFS_SHARED_DATA_REF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) switch (key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) case BTRFS_SHARED_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* SHARED DIRECT METADATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = add_direct_ref(fs_info, preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) info_level + 1, key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) bytenr, 1, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) case BTRFS_SHARED_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* SHARED DIRECT FULL backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct btrfs_shared_data_ref *sdref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) sdref = btrfs_item_ptr(leaf, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct btrfs_shared_data_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) count = btrfs_shared_data_ref_count(leaf, sdref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = add_direct_ref(fs_info, preftrees, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) key.offset, bytenr, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sc, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) case BTRFS_TREE_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* NORMAL INDIRECT METADATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = add_indirect_ref(fs_info, preftrees, key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) NULL, info_level + 1, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 1, NULL, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) case BTRFS_EXTENT_DATA_REF_KEY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* NORMAL INDIRECT DATA backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct btrfs_extent_data_ref *dref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dref = btrfs_item_ptr(leaf, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct btrfs_extent_data_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) count = btrfs_extent_data_ref_count(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) key.objectid = btrfs_extent_data_ref_objectid(leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) key.type = BTRFS_EXTENT_DATA_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) key.offset = btrfs_extent_data_ref_offset(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (sc && sc->inum && key.objectid != sc->inum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ret = BACKREF_FOUND_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) root = btrfs_extent_data_ref_root(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ret = add_indirect_ref(fs_info, preftrees, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) &key, 0, bytenr, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) sc, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * this adds all existing backrefs (inline backrefs, backrefs and delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * refs) for the given bytenr to the refs list, merges duplicates and resolves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * indirect refs to their parent bytenr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * When roots are found, they're added to the roots list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * much like trans == NULL case, the difference only lies in it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * commit root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * The special case is for qgroup to search roots in commit_transaction().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * shared extent is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * Otherwise this returns 0 for success and <0 for an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * If ignore_offset is set to false, only extent refs whose offsets match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * extent_item_pos are returned. If true, every extent ref is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * and extent_item_pos is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * FIXME some caching might speed things up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int find_parent_nodes(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct btrfs_fs_info *fs_info, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) u64 time_seq, struct ulist *refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct ulist *roots, const u64 *extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct share_check *sc, bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct btrfs_delayed_ref_root *delayed_refs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct btrfs_delayed_ref_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int info_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct prelim_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct extent_inode_elem *eie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) struct preftrees preftrees = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) .direct = PREFTREE_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) .indirect = PREFTREE_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) .indirect_missing_keys = PREFTREE_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) key.objectid = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) key.type = BTRFS_METADATA_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (time_seq == SEQ_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * grab both a lock on the path and a lock on the delayed ref head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * We need both to get a consistent picture of how the refs look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * at a specified point in time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* This shouldn't happen, indicates a bug or fs corruption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ASSERT(ret != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (trans && likely(trans->type != __TRANS_DUMMY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) time_seq != SEQ_LAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (trans && time_seq != SEQ_LAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * look if there are updates for this ref queued and lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) delayed_refs = &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) spin_lock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!mutex_trylock(&head->mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) refcount_inc(&head->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * Mutex was contended, block until it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * released and try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) mutex_lock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mutex_unlock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) btrfs_put_delayed_ref_head(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ret = add_delayed_refs(fs_info, head, time_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) &preftrees, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) mutex_unlock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (path->slots[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) btrfs_item_key_to_cpu(leaf, &key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (key.objectid == bytenr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) (key.type == BTRFS_EXTENT_ITEM_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) key.type == BTRFS_METADATA_ITEM_KEY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ret = add_inline_refs(fs_info, path, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) &info_level, &preftrees, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ret = add_keyed_refs(fs_info, path, bytenr, info_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) &preftrees, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) extent_item_pos, sc, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * This walks the tree of merged and resolved refs. Tree blocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * read in as needed. Unique entries are added to the ulist, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * the list of found roots is updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * We release the entire tree in one go before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) node = rb_first_cached(&preftrees.direct.root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ref = rb_entry(node, struct prelim_ref, rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) node = rb_next(&ref->rbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * ref->count < 0 can happen here if there are delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * refs with a node->action of BTRFS_DROP_DELAYED_REF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * prelim_ref_insert() relies on this when merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * identical refs to keep the overall count correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * prelim_ref_insert() will merge only those refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * which compare identically. Any refs having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * e.g. different offsets would not be merged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * and would retain their original ref->count < 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (roots && ref->count && ref->root_id && ref->parent == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (sc && sc->root_objectid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ref->root_id != sc->root_objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ret = BACKREF_FOUND_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* no parent == root of tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (ref->count && ref->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (extent_item_pos && !ref->inode_list &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ref->level == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) eb = read_tree_block(fs_info, ref->parent, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ref->level, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (IS_ERR(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ret = PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) } else if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!path->skip_locking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) btrfs_tree_read_lock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) btrfs_set_lock_blocking_read(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ret = find_extent_in_eb(eb, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *extent_item_pos, &eie, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (!path->skip_locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) btrfs_tree_read_unlock_blocking(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ref->inode_list = eie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ret = ulist_add_merge_ptr(refs, ref->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ref->inode_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) (void **)&eie, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!ret && extent_item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * We've recorded that parent, so we must extend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * its inode list here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * However if there was corruption we may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * have found an eie, return an error in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) ASSERT(eie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!eie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) while (eie->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) eie = eie->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) eie->next = ref->inode_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) eie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) prelim_release(&preftrees.direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) prelim_release(&preftrees.indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) prelim_release(&preftrees.indirect_missing_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) free_inode_elem_list(eie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static void free_leaf_list(struct ulist *blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct ulist_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct extent_inode_elem *eie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct ulist_iterator uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ULIST_ITER_INIT(&uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) while ((node = ulist_next(blocks, &uiter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!node->aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) eie = unode_aux_to_inode_list(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) free_inode_elem_list(eie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) node->aux = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ulist_free(blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Finds all leafs with a reference to the specified combination of bytenr and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * offset. key_list_head will point to a list of corresponding keys (caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * free each list element). The leafs will be stored in the leafs ulist, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * must be freed with ulist_free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * returns 0 on success, <0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct btrfs_fs_info *fs_info, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) u64 time_seq, struct ulist **leafs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) const u64 *extent_item_pos, bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) *leafs = ulist_alloc(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!*leafs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) *leafs, NULL, extent_item_pos, NULL, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (ret < 0 && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) free_leaf_list(*leafs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * walk all backrefs for a given extent to find all roots that reference this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * extent. Walking a backref means finding all extents that reference this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * extent and in turn walk the backrefs of those, too. Naturally this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * recursive process, but here it is implemented in an iterative fashion: We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * find all referencing extents for the extent in question and put them on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * list. In turn, we find all referencing extents for those, further appending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * to the list. The way we iterate the list allows adding more elements after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * the current while iterating. The process stops when we reach the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * list. Found roots are added to the roots list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * returns 0 on success, < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct btrfs_fs_info *fs_info, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) u64 time_seq, struct ulist **roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct ulist *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct ulist_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct ulist_iterator uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) tmp = ulist_alloc(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) *roots = ulist_alloc(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!*roots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ulist_free(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ULIST_ITER_INIT(&uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) tmp, *roots, NULL, NULL, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (ret < 0 && ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ulist_free(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ulist_free(*roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) *roots = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) node = ulist_next(tmp, &uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) bytenr = node->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ulist_free(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct btrfs_fs_info *fs_info, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) u64 time_seq, struct ulist **roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) time_seq, roots, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * btrfs_check_shared - tell us whether an extent is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * btrfs_check_shared uses the backref walking code but will short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * circuit as soon as it finds a root or inode that doesn't match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * one passed in. This provides a significant performance benefit for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * callers (such as fiemap) which want to know whether the extent is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * shared but do not need a ref count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * This attempts to attach to the running transaction in order to account for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * delayed refs, but continues on even when no running transaction exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct ulist *roots, struct ulist *tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct btrfs_trans_handle *trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct ulist_iterator uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct seq_list elem = SEQ_LIST_INIT(elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct share_check shared = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) .root_objectid = root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .inum = inum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) .share_count = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ulist_init(roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) ulist_init(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) trans = btrfs_join_transaction_nostart(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ret = PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) btrfs_get_tree_mod_seq(fs_info, &elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ULIST_ITER_INIT(&uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) roots, NULL, &shared, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (ret == BACKREF_FOUND_SHARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* this is the only condition under which we return 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (ret < 0 && ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) node = ulist_next(tmp, &uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) bytenr = node->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) shared.share_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) btrfs_put_tree_mod_seq(fs_info, &elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ulist_release(roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ulist_release(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) u64 start_off, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct btrfs_inode_extref **ret_extref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) u64 *found_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int ret, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) const struct extent_buffer *leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) key.objectid = inode_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) key.type = BTRFS_INODE_EXTREF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) key.offset = start_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (slot >= btrfs_header_nritems(leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * If the item at offset is not found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * btrfs_search_slot will point us to the slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * where it should be inserted. In our case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * that will be the slot directly before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * next INODE_REF_KEY_V2 item. In the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * that we're pointing to the last slot in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * leaf, we must move one leaf over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ret = btrfs_next_leaf(root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (ret >= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) btrfs_item_key_to_cpu(leaf, &found_key, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * Check that we're still looking at an extended ref key for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * this particular objectid. If we have different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * objectid or type then there are no more to be found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * in the tree and we can exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (found_key.objectid != inode_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (found_key.type != BTRFS_INODE_EXTREF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) extref = (struct btrfs_inode_extref *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) *ret_extref = extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (found_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) *found_off = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * this iterates to turn a name (from iref/extref) into a full filesystem path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * Elements of the path are separated by '/' and the path is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * 0-terminated. the path is only given within the current file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * Therefore, it never starts with a '/'. the caller is responsible to provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * the start point of the resulting string is returned. this pointer is within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * dest, normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * in case the path buffer would overflow, the pointer is decremented further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * as if output was written to the buffer, though no more output is actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * generated. that way, the caller can determine how much space would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * required for the path to fit into the buffer. in that case, the returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * value will be smaller than dest. callers must check this!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) u32 name_len, unsigned long name_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct extent_buffer *eb_in, u64 parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) char *dest, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) u64 next_inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) s64 bytes_left = ((s64)size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct extent_buffer *eb = eb_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int leave_spinning = path->leave_spinning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct btrfs_inode_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (bytes_left >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) dest[bytes_left] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) path->leave_spinning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) bytes_left -= name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (bytes_left >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) read_extent_buffer(eb, dest + bytes_left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) name_off, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (eb != eb_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (!path->skip_locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) btrfs_tree_read_unlock_blocking(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ret = btrfs_find_item(fs_root, path, parent, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) BTRFS_INODE_REF_KEY, &found_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) next_inum = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* regular exit ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (parent == next_inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /* make sure we can use eb after releasing the path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (eb != eb_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (!path->skip_locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) btrfs_set_lock_blocking_read(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) path->nodes[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) path->locks[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) name_len = btrfs_inode_ref_name_len(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) name_off = (unsigned long)(iref + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) parent = next_inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) --bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (bytes_left >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) dest[bytes_left] = '/';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) path->leave_spinning = leave_spinning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return dest + bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * this makes the path point to (logical EXTENT_ITEM *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * tree blocks and <0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct btrfs_path *path, struct btrfs_key *found_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) u64 *flags_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) u64 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) const struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) key.type = BTRFS_METADATA_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) key.type = BTRFS_EXTENT_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) key.objectid = logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (found_key->type == BTRFS_METADATA_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) size = fs_info->nodesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) size = found_key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (found_key->objectid > logical ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) found_key->objectid + size <= logical) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) "logical %llu is not within any extent", logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) eb = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) item_size = btrfs_item_size_nr(eb, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) BUG_ON(item_size < sizeof(*ei));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) flags = btrfs_extent_flags(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) logical, logical - found_key->objectid, found_key->objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) found_key->offset, flags, item_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) WARN_ON(!flags_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (flags_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) else if (flags & BTRFS_EXTENT_FLAG_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) *flags_ret = BTRFS_EXTENT_FLAG_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * helper function to iterate extent inline refs. ptr must point to a 0 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * for the first call and may be modified. it is used to track state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * if more refs exist, 0 is returned and the next call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * get_extent_inline_ref must pass the modified ptr parameter to get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * next ref. after the last ref was processed, 1 is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * returns <0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static int get_extent_inline_ref(unsigned long *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) const struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) const struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) const struct btrfs_extent_item *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) u32 item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct btrfs_extent_inline_ref **out_eiref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) int *out_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct btrfs_tree_block_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!*ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* first call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) flags = btrfs_extent_flags(eb, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (key->type == BTRFS_METADATA_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* a skinny metadata extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) *out_eiref =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) (struct btrfs_extent_inline_ref *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) info = (struct btrfs_tree_block_info *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) *out_eiref =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) (struct btrfs_extent_inline_ref *)(info + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) *ptr = (unsigned long)*out_eiref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) end = (unsigned long)ei + item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) BTRFS_REF_TYPE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (*out_type == BTRFS_REF_TYPE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) *ptr += btrfs_extent_inline_ref_size(*out_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) WARN_ON(*ptr > end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (*ptr == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return 1; /* last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * reads the tree block backref for an extent. tree level and root are returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * through out_level and out_root. ptr must point to a 0 value for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * call and may be modified (see get_extent_inline_ref comment).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * returns 0 if data was provided, 1 if there was no more data to provide or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * <0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) struct btrfs_key *key, struct btrfs_extent_item *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) u32 item_size, u64 *out_root, u8 *out_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct btrfs_extent_inline_ref *eiref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (*ptr == (unsigned long)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) &eiref, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (type == BTRFS_TREE_BLOCK_REF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) type == BTRFS_SHARED_BLOCK_REF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /* we can treat both ref types equally here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (key->type == BTRFS_EXTENT_ITEM_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct btrfs_tree_block_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) info = (struct btrfs_tree_block_info *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) *out_level = btrfs_tree_block_level(eb, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) *out_level = (u8)key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) *ptr = (unsigned long)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct extent_inode_elem *inode_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) u64 root, u64 extent_item_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) iterate_extent_inodes_t *iterate, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct extent_inode_elem *eie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) for (eie = inode_list; eie; eie = eie->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) extent_item_objectid, eie->inum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) eie->offset, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) ret = iterate(eie->inum, eie->offset, root, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) "stopping iteration for %llu due to ret=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) extent_item_objectid, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * calls iterate() for every inode that references the extent identified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * the given parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * when the iterator function returns a non-zero value, iteration stops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) u64 extent_item_objectid, u64 extent_item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) int search_commit_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) iterate_extent_inodes_t *iterate, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) struct btrfs_trans_handle *trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) struct ulist *refs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct ulist *roots = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct ulist_node *ref_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct ulist_node *root_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) struct ulist_iterator ref_uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) struct ulist_iterator root_uiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) btrfs_debug(fs_info, "resolving all inodes for extent %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) extent_item_objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (!search_commit_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) trans = btrfs_attach_transaction(fs_info->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (IS_ERR(trans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (PTR_ERR(trans) != -ENOENT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) PTR_ERR(trans) != -EROFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return PTR_ERR(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) trans = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) down_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) tree_mod_seq_elem.seq, &refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) &extent_item_pos, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) ULIST_ITER_INIT(&ref_uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) tree_mod_seq_elem.seq, &roots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) ULIST_ITER_INIT(&root_uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) "root %llu references leaf %llu, data list %#llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) root_node->val, ref_node->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ref_node->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) ret = iterate_leaf_refs(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) (struct extent_inode_elem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) (uintptr_t)ref_node->aux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) root_node->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) extent_item_objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) iterate, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) ulist_free(roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) free_leaf_list(refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) btrfs_end_transaction(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) up_read(&fs_info->commit_root_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) iterate_extent_inodes_t *iterate, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) bool ignore_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) u64 extent_item_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) u64 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) int search_commit_root = path->search_commit_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) extent_item_pos = logical - found_key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) ret = iterate_extent_inodes(fs_info, found_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) extent_item_pos, search_commit_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) iterate, ctx, ignore_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct extent_buffer *eb, void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) iterate_irefs_t *iterate, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) u32 cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) u32 name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) u64 parent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct btrfs_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct btrfs_inode_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct btrfs_key found_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) while (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ret = btrfs_find_item(fs_root, path, inum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) &found_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) ret = found ? 0 : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) ++found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) parent = found_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) eb = btrfs_clone_extent_buffer(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (!eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) item = btrfs_item_nr(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) name_len = btrfs_inode_ref_name_len(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* path must be released before calling iterate()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) btrfs_debug(fs_root->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) "following ref at offset %u for inode %llu in tree %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) cur, found_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) fs_root->root_key.objectid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ret = iterate(parent, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) (unsigned long)(iref + 1), eb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) len = sizeof(*iref) + name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) iref = (struct btrfs_inode_ref *)((char *)iref + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) iterate_irefs_t *iterate, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) struct btrfs_inode_extref *extref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) u32 item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) u32 cur_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) ret = found ? 0 : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) ++found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) slot = path->slots[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) eb = btrfs_clone_extent_buffer(path->nodes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (!eb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) item_size = btrfs_item_size_nr(eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ptr = btrfs_item_ptr_offset(eb, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) cur_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) while (cur_offset < item_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) u32 name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) parent = btrfs_inode_extref_parent(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) name_len = btrfs_inode_extref_name_len(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ret = iterate(parent, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) (unsigned long)&extref->name, eb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) cur_offset += btrfs_inode_extref_name_len(eb, extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) cur_offset += sizeof(*extref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) struct btrfs_path *path, iterate_irefs_t *iterate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) int found_refs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) ++found_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) else if (ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (ret == -ENOENT && found_refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * returns 0 if the path could be dumped (probably truncated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) * returns <0 in case of an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) struct extent_buffer *eb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) struct inode_fs_paths *ipath = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) char *fspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) char *fspath_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) int i = ipath->fspath->elem_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) const int s_ptr = sizeof(char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) u32 bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) bytes_left = ipath->fspath->bytes_left > s_ptr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) ipath->fspath->bytes_left - s_ptr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) name_off, eb, inum, fspath_min, bytes_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (IS_ERR(fspath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) return PTR_ERR(fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (fspath > fspath_min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ipath->fspath->val[i] = (u64)(unsigned long)fspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ++ipath->fspath->elem_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ipath->fspath->bytes_left = fspath - fspath_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) ++ipath->fspath->elem_missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) ipath->fspath->bytes_missing += fspath_min - fspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) ipath->fspath->bytes_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * this dumps all file system paths to the inode into the ipath struct, provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * is has been created large enough. each path is zero-terminated and accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * from ipath->fspath->val[i].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * when it returns, there are ipath->fspath->elem_cnt number of paths available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * have been needed to return all paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) inode_to_path, ipath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct btrfs_data_container *init_data_container(u32 total_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) struct btrfs_data_container *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) size_t alloc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) data = kvmalloc(alloc_bytes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (total_bytes >= sizeof(*data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) data->bytes_left = total_bytes - sizeof(*data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) data->bytes_missing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) data->bytes_missing = sizeof(*data) - total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) data->bytes_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) data->elem_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) data->elem_missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * allocates space to return multiple file system paths for an inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * total_bytes to allocate are passed, note that space usable for actual path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * information will be total_bytes - sizeof(struct inode_fs_paths).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * the returned pointer must be freed with free_ipath() in the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) struct btrfs_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct inode_fs_paths *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct btrfs_data_container *fspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) fspath = init_data_container(total_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (IS_ERR(fspath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return ERR_CAST(fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (!ifp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) kvfree(fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) ifp->btrfs_path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) ifp->fspath = fspath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) ifp->fs_root = fs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) void free_ipath(struct inode_fs_paths *ipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (!ipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) kvfree(ipath->fspath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) kfree(ipath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct btrfs_backref_iter *btrfs_backref_iter_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct btrfs_backref_iter *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) ret = kzalloc(sizeof(*ret), gfp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) ret->path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (!ret->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) kfree(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) /* Current backref iterator only supports iteration in commit root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) ret->path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) ret->path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) ret->fs_info = fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) struct btrfs_fs_info *fs_info = iter->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct btrfs_path *path = iter->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) key.objectid = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) key.type = BTRFS_METADATA_ITEM_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) key.offset = (u64)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) iter->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (path->slots[0] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) path->slots[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) memcpy(&iter->cur_key, &key, sizeof(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) iter->end_ptr = (u32)(iter->item_ptr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) btrfs_item_size_nr(path->nodes[0], path->slots[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * Only support iteration on tree backref yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * This is an extra precaution for non skinny-metadata, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * EXTENT_ITEM is also used for tree blocks, that we can only use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * extent flags to determine if it's a tree block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) /* If there is no inline backref, go search for keyed backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (iter->cur_ptr >= iter->end_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) ret = btrfs_next_item(fs_info->extent_root, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) /* No inline nor keyed ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (iter->cur_key.objectid != bytenr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) iter->item_ptr = iter->cur_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) path->nodes[0], path->slots[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) btrfs_backref_iter_release(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * Go to the next backref item of current bytenr, can be either inlined or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * keyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) * Caller needs to check whether it's inline ref or not by iter->cur_key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * Return 0 if we get next backref without problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * Return >0 if there is no extra backref for this bytenr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * Return <0 if there is something wrong happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct extent_buffer *eb = btrfs_backref_get_eb(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) struct btrfs_path *path = iter->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct btrfs_extent_inline_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (btrfs_backref_iter_is_inline_ref(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) /* We're still inside the inline refs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) ASSERT(iter->cur_ptr < iter->end_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (btrfs_backref_has_tree_block_info(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) /* First tree block info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) size = sizeof(struct btrfs_tree_block_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /* Use inline ref type to determine the size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) iref = (struct btrfs_extent_inline_ref *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) ((unsigned long)iter->cur_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) type = btrfs_extent_inline_ref_type(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) size = btrfs_extent_inline_ref_size(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) iter->cur_ptr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (iter->cur_ptr < iter->end_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /* All inline items iterated, fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) /* We're at keyed items, there is no inline item, go to the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (iter->cur_key.objectid != iter->bytenr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) iter->cur_ptr = iter->item_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) path->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct btrfs_backref_cache *cache, int is_reloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) cache->rb_root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) for (i = 0; i < BTRFS_MAX_LEVEL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) INIT_LIST_HEAD(&cache->pending[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) INIT_LIST_HEAD(&cache->changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) INIT_LIST_HEAD(&cache->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) INIT_LIST_HEAD(&cache->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) INIT_LIST_HEAD(&cache->pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) INIT_LIST_HEAD(&cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) cache->fs_info = fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) cache->is_reloc = is_reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct btrfs_backref_node *btrfs_backref_alloc_node(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) struct btrfs_backref_cache *cache, u64 bytenr, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) node = kzalloc(sizeof(*node), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) INIT_LIST_HEAD(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) INIT_LIST_HEAD(&node->upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) INIT_LIST_HEAD(&node->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) RB_CLEAR_NODE(&node->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) cache->nr_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) node->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) node->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) struct btrfs_backref_edge *btrfs_backref_alloc_edge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct btrfs_backref_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) edge = kzalloc(sizeof(*edge), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) cache->nr_edges++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * Drop the backref node from cache, also cleaning up all its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * upper edges and any uncached nodes in the path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * This cleanup happens bottom up, thus the node should either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * be the lowest node in the cache or a detached node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) BUG_ON(!node->lowest && !node->detached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) while (!list_empty(&node->upper)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) edge = list_entry(node->upper.next, struct btrfs_backref_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) upper = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) list_del(&edge->list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) list_del(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * Add the node to leaf node list if no other child block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) if (list_empty(&upper->lower)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) list_add_tail(&upper->lower, &cache->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) upper->lowest = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) btrfs_backref_drop_node(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * Release all nodes/edges from current cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) struct btrfs_backref_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) while (!list_empty(&cache->detached)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) node = list_entry(cache->detached.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) btrfs_backref_cleanup_node(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) while (!list_empty(&cache->leaves)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) node = list_entry(cache->leaves.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct btrfs_backref_node, lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) btrfs_backref_cleanup_node(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) cache->last_trans = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) for (i = 0; i < BTRFS_MAX_LEVEL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) ASSERT(list_empty(&cache->pending[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) ASSERT(list_empty(&cache->pending_edge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ASSERT(list_empty(&cache->useless_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) ASSERT(list_empty(&cache->changed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) ASSERT(list_empty(&cache->detached));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ASSERT(!cache->nr_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) ASSERT(!cache->nr_edges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * Handle direct tree backref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) * Direct tree backref means, the backref item shows its parent bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) * @ref_key: The converted backref key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) * For keyed backref, it's the item key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * For inlined backref, objectid is the bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * type is btrfs_inline_ref_type, offset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * btrfs_inline_ref_offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) struct btrfs_key *ref_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct btrfs_backref_node *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) /* Only reloc root uses backref pointing to itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (ref_key->objectid == ref_key->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) cur->is_reloc_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* Only reloc backref cache cares about a specific root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (cache->is_reloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) root = find_reloc_root(cache->fs_info, cur->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) cur->root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * For generic purpose backref cache, reloc root node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * is useless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) list_add(&cur->list, &cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) edge = btrfs_backref_alloc_edge(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (!edge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (!rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /* Parent node not yet cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) upper = btrfs_backref_alloc_node(cache, ref_key->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) cur->level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (!upper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * Backrefs for the upper level block isn't cached, add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * block to pending list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) list_add_tail(&edge->list[UPPER], &cache->pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /* Parent node already cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) ASSERT(upper->checked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) INIT_LIST_HEAD(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * Handle indirect tree backref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * Indirect tree backref means, we only know which tree the node belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * We still need to do a tree search to find out the parents. This is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * TREE_BLOCK_REF backref (keyed or inlined).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * @ref_key: The same as @ref_key in handle_direct_tree_backref()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * @tree_key: The first key of this tree block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * @path: A clean (released) path, to avoid allocating path everytime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * the function get called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct btrfs_key *ref_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct btrfs_key *tree_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) struct btrfs_backref_node *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct btrfs_backref_node *lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct btrfs_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) bool need_check = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (IS_ERR(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) return PTR_ERR(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) cur->cowonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (btrfs_root_level(&root->root_item) == cur->level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /* Tree root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) * For reloc backref cache, we may ignore reloc root. But for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * general purpose backref cache, we can't rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * btrfs_should_ignore_reloc_root() as it may conflict with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * current running relocation and lead to missing root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * For general purpose backref cache, reloc root detection is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * completely relying on direct backref (key->offset is parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * bytenr), thus only do such check for reloc cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) list_add(&cur->list, &cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) cur->root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) level = cur->level + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) /* Search the tree to find parent blocks referring to the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) path->search_commit_root = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) path->skip_locking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) path->lowest_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) path->lowest_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) if (ret > 0 && path->slots[level] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) path->slots[level]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) cur->bytenr, level - 1, root->root_key.objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) tree_key->objectid, tree_key->type, tree_key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) lower = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) /* Add all nodes and edges in the path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) for (; level < BTRFS_MAX_LEVEL; level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (!path->nodes[level]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ASSERT(btrfs_root_bytenr(&root->root_item) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) lower->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /* Same as previous should_ignore_reloc_root() call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (btrfs_should_ignore_reloc_root(root) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) cache->is_reloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) list_add(&lower->list, &cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) lower->root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) edge = btrfs_backref_alloc_edge(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (!edge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) eb = path->nodes[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) rb_node = rb_simple_search(&cache->rb_root, eb->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (!rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) upper = btrfs_backref_alloc_node(cache, eb->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) lower->level + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (!upper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) upper->owner = btrfs_header_owner(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) upper->cowonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) * If we know the block isn't shared we can avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * checking its backrefs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (btrfs_block_can_be_shared(root, eb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) upper->checked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) upper->checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * Add the block to pending list if we need to check its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * backrefs, we only do this once while walking up a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * tree as we will catch anything else later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (!upper->checked && need_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) need_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) list_add_tail(&edge->list[UPPER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) &cache->pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (upper->checked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) need_check = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) INIT_LIST_HEAD(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) upper = rb_entry(rb_node, struct btrfs_backref_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) ASSERT(upper->checked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) INIT_LIST_HEAD(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (!upper->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) upper->owner = btrfs_header_owner(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) btrfs_put_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) lower = upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) upper = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) btrfs_release_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) * Add backref node @cur into @cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) * links aren't yet bi-directional. Needs to finish such links.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) * Use btrfs_backref_finish_upper_links() to finish such linkage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * @path: Released path for indirect tree backref lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * @iter: Released backref iter for extent tree search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * @node_key: The first key of the tree block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) struct btrfs_backref_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) struct btrfs_key *node_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) struct btrfs_backref_node *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) struct btrfs_fs_info *fs_info = cache->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) struct btrfs_backref_node *exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ret = btrfs_backref_iter_start(iter, cur->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * We skip the first btrfs_tree_block_info, as we don't use the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) * stored in it, but fetch it from the tree block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (btrfs_backref_has_tree_block_info(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) ret = btrfs_backref_iter_next(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /* No extra backref? This means the tree block is corrupted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) WARN_ON(cur->checked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (!list_empty(&cur->upper)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * The backref was added previously when processing backref of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * type BTRFS_TREE_BLOCK_REF_KEY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) ASSERT(list_is_singular(&cur->upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ASSERT(list_empty(&edge->list[UPPER]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) exist = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * Add the upper level block to pending list if we need check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * its backrefs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) if (!exist->checked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) list_add_tail(&edge->list[UPPER], &cache->pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) exist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) eb = btrfs_backref_get_eb(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) key.objectid = iter->bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (btrfs_backref_iter_is_inline_ref(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) struct btrfs_extent_inline_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) /* Update key for inline backref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) iref = (struct btrfs_extent_inline_ref *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) ((unsigned long)iter->cur_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) type = btrfs_get_extent_inline_ref_type(eb, iref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) BTRFS_REF_TYPE_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (type == BTRFS_REF_TYPE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ret = -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) key.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) key.offset = btrfs_extent_inline_ref_offset(eb, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) key.type = iter->cur_key.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) key.offset = iter->cur_key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * Parent node found and matches current inline ref, no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * rebuild this node for this inline ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (exist &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) exist->owner == key.offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) exist->bytenr == key.offset))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) exist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) ret = handle_direct_tree_backref(cache, &key, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) btrfs_print_v0_err(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) btrfs_handle_fs_error(fs_info, ret, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) * means the root objectid. We need to search the tree to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) * its parent bytenr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) ret = handle_indirect_tree_backref(cache, path, &key, node_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) cur->checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) WARN_ON(exist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) btrfs_backref_iter_release(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * Finish the upwards linkage created by btrfs_backref_add_tree_node()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) struct btrfs_backref_node *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) struct list_head *useless_node = &cache->useless_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) LIST_HEAD(pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) ASSERT(start->checked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) /* Insert this node to cache if it's not COW-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) if (!start->cowonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) &start->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (rb_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) btrfs_backref_panic(cache->fs_info, start->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) list_add_tail(&start->lower, &cache->leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) * Use breadth first search to iterate all related edges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) * The starting points are all the edges of this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) list_for_each_entry(edge, &start->upper, list[LOWER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) list_add_tail(&edge->list[UPPER], &pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) while (!list_empty(&pending_edge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) struct btrfs_backref_node *lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) edge = list_first_entry(&pending_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct btrfs_backref_edge, list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) list_del_init(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) upper = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) lower = edge->node[LOWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) /* Parent is detached, no need to keep any edges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (upper->detached) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) list_del(&edge->list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) /* Lower node is orphan, queue for cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (list_empty(&lower->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) list_add(&lower->list, useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * All new nodes added in current build_backref_tree() haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * been linked to the cache rb tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * So if we have upper->rb_node populated, this means a cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) * hit. We only need to link the edge, as @upper and all its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) * parents have already been linked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (!RB_EMPTY_NODE(&upper->rb_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) if (upper->lowest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) list_del_init(&upper->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) upper->lowest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) list_add_tail(&edge->list[UPPER], &upper->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) /* Sanity check, we shouldn't have any unchecked nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (!upper->checked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /* Sanity check, COW-only node has non-COW-only parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) if (start->cowonly != upper->cowonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) /* Only cache non-COW-only (subvolume trees) tree blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (!upper->cowonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) &upper->rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (rb_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) btrfs_backref_panic(cache->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) upper->bytenr, -EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) return -EUCLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) list_add_tail(&edge->list[UPPER], &upper->lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) * Also queue all the parent edges of this uncached node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) * to finish the upper linkage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) list_for_each_entry(edge, &upper->upper, list[LOWER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) list_add_tail(&edge->list[UPPER], &pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct btrfs_backref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct btrfs_backref_node *lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct btrfs_backref_node *upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) struct btrfs_backref_edge *edge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) while (!list_empty(&cache->useless_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) lower = list_first_entry(&cache->useless_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) list_del_init(&lower->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) while (!list_empty(&cache->pending_edge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) edge = list_first_entry(&cache->pending_edge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) struct btrfs_backref_edge, list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) list_del(&edge->list[UPPER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) list_del(&edge->list[LOWER]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) lower = edge->node[LOWER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) upper = edge->node[UPPER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) btrfs_backref_free_edge(cache, edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) * Lower is no longer linked to any upper backref nodes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) * isn't in the cache, we can free it ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) if (list_empty(&lower->upper) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) RB_EMPTY_NODE(&lower->rb_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) list_add(&lower->list, &cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (!RB_EMPTY_NODE(&upper->rb_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* Add this guy's upper edges to the list to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) list_for_each_entry(edge, &upper->upper, list[LOWER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) list_add_tail(&edge->list[UPPER],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) &cache->pending_edge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) if (list_empty(&upper->upper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) list_add(&upper->list, &cache->useless_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) while (!list_empty(&cache->useless_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) lower = list_first_entry(&cache->useless_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) struct btrfs_backref_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) list_del_init(&lower->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (lower == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) btrfs_backref_drop_node(cache, lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) btrfs_backref_cleanup_node(cache, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) ASSERT(list_empty(&cache->useless_node) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) list_empty(&cache->pending_edge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) }