^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014 Facebook. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "disk-io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "locking.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "delayed-ref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ref-verify.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Used to keep track the roots and number of refs each root has for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * bytenr. This just tracks the number of direct references, no shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct root_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u64 root_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u64 num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * These are meant to represent what should exist in the extent tree, these can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * be used to verify the extent tree is consistent as these should all match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * what the extent tree says.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct ref_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u64 root_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u64 owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u64 num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MAX_TRACE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Whenever we add/remove a reference we record the action. The action maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * back to the delayed ref action. We hold the ref we are changing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * action so we can account for the history properly, and we record the root we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * were called with since it could be different from ref_root. We also store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * stack traces because that's how I roll.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct ref_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct ref_entry ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long trace[MAX_TRACE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int trace_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * One of these for every block we reference, it holds the roots and references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * to it as well as all of the ref actions that have occurred to it. We never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * free it until we unmount the file system in order to make sure re-allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * are happening properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct block_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u64 num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int from_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct rb_root roots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct rb_root refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct list_head actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static struct block_entry *insert_block_entry(struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct block_entry *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct block_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) entry = rb_entry(parent_node, struct block_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (entry->bytenr > be->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else if (entry->bytenr < be->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rb_link_node(&be->node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rb_insert_color(&be->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct block_entry *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) n = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) entry = rb_entry(n, struct block_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (entry->bytenr < bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) else if (entry->bytenr > bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static struct root_entry *insert_root_entry(struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct root_entry *re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct root_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) entry = rb_entry(parent_node, struct root_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (entry->root_objectid > re->root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else if (entry->root_objectid < re->root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rb_link_node(&re->node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) rb_insert_color(&re->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (ref1->root_objectid < ref2->root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (ref1->root_objectid > ref2->root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (ref1->parent < ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (ref1->parent > ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (ref1->owner < ref2->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ref1->owner > ref2->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (ref1->offset < ref2->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (ref1->offset > ref2->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static struct ref_entry *insert_ref_entry(struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct ref_entry *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct ref_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) entry = rb_entry(parent_node, struct ref_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cmp = comp_refs(entry, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else if (cmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rb_link_node(&ref->node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rb_insert_color(&ref->node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct root_entry *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) n = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) entry = rb_entry(n, struct root_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (entry->root_objectid < objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else if (entry->root_objectid > objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void __save_stack_trace(struct ref_action *ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void __print_stack_trace(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct ref_action *ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ra->trace_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) btrfs_err(fs_info, " ref-verify: no stacktrace");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) stack_trace_print(ra->trace, ra->trace_len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void inline __save_stack_trace(struct ref_action *ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct ref_action *ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) btrfs_err(fs_info, " ref-verify: no stacktrace support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void free_block_entry(struct block_entry *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct root_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct ref_entry *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct ref_action *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) while ((n = rb_first(&be->roots))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) re = rb_entry(n, struct root_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rb_erase(&re->node, &be->roots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) while((n = rb_first(&be->refs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ref = rb_entry(n, struct ref_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rb_erase(&ref->node, &be->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) while (!list_empty(&be->actions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ra = list_first_entry(&be->actions, struct ref_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) list_del(&ra->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kfree(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u64 bytenr, u64 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u64 root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct block_entry *be = NULL, *exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct root_entry *re = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) be = kzalloc(sizeof(struct block_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!be || !re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) kfree(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) be->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) be->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) re->root_objectid = root_objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) re->num_refs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_lock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) exist = insert_block_entry(&fs_info->block_tree, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (exist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (root_objectid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct root_entry *exist_re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) exist_re = insert_root_entry(&exist->roots, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (exist_re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) kfree(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) be->num_refs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) be->metadata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) be->from_disk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) be->roots = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) be->refs = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) INIT_LIST_HEAD(&be->actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (root_objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) insert_root_entry(&be->roots, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u64 parent, u64 bytenr, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct block_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct root_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct ref_entry *ref = NULL, *exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ref->root_objectid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ref->root_objectid = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ref->owner = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ref->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ref->num_refs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (IS_ERR(be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return PTR_ERR(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) be->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) be->from_disk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) be->metadata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ASSERT(ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) re = lookup_root_entry(&be->roots, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ASSERT(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) re->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) exist = insert_ref_entry(&be->refs, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (exist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) exist->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u64 parent, u32 num_refs, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u64 num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct block_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct ref_entry *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) be = add_block_entry(fs_info, bytenr, num_bytes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (IS_ERR(be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return PTR_ERR(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) be->num_refs += num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ref->num_refs = num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (insert_ref_entry(&be->refs, ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) btrfs_err(fs_info, "existing shared ref when reading from disk?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct extent_buffer *leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct btrfs_extent_data_ref *dref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u64 bytenr, u64 num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct block_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct ref_entry *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct root_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u64 ref_root = btrfs_extent_data_ref_root(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u64 owner = btrfs_extent_data_ref_objectid(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (IS_ERR(be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return PTR_ERR(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) be->num_refs += num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ref->parent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ref->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ref->root_objectid = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ref->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ref->num_refs = num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (insert_ref_entry(&be->refs, ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) btrfs_err(fs_info, "existing ref when reading from disk?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) re = lookup_root_entry(&be->roots, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) btrfs_err(fs_info, "missing root in new block entry?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) re->num_refs += num_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static int process_extent_item(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct btrfs_path *path, struct btrfs_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int slot, int *tree_block_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct btrfs_extent_item *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct btrfs_extent_inline_ref *iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct btrfs_extent_data_ref *dref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct btrfs_shared_data_ref *sref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u32 item_size = btrfs_item_size_nr(leaf, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long end, ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u64 offset, flags, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int type, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) flags = btrfs_extent_flags(leaf, ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if ((key->type == BTRFS_EXTENT_ITEM_KEY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct btrfs_tree_block_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) info = (struct btrfs_tree_block_info *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *tree_block_level = btrfs_tree_block_level(leaf, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) iref = (struct btrfs_extent_inline_ref *)(info + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (key->type == BTRFS_METADATA_ITEM_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *tree_block_level = key->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) iref = (struct btrfs_extent_inline_ref *)(ei + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ptr = (unsigned long)iref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) end = (unsigned long)ei + item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) while (ptr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) iref = (struct btrfs_extent_inline_ref *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) type = btrfs_extent_inline_ref_type(leaf, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) offset = btrfs_extent_inline_ref_offset(leaf, iref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) case BTRFS_TREE_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = add_tree_block(fs_info, offset, 0, key->objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *tree_block_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) case BTRFS_SHARED_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = add_tree_block(fs_info, 0, offset, key->objectid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *tree_block_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) case BTRFS_EXTENT_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dref = (struct btrfs_extent_data_ref *)(&iref->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = add_extent_data_ref(fs_info, leaf, dref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) key->objectid, key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case BTRFS_SHARED_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) sref = (struct btrfs_shared_data_ref *)(iref + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) count = btrfs_shared_data_ref_count(leaf, sref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = add_shared_data_ref(fs_info, offset, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) key->objectid, key->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) btrfs_err(fs_info, "invalid key type in iref");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ptr += btrfs_extent_inline_ref_size(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int process_leaf(struct btrfs_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct btrfs_path *path, u64 *bytenr, u64 *num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct extent_buffer *leaf = path->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct btrfs_extent_data_ref *dref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct btrfs_shared_data_ref *sref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int i = 0, tree_block_level = 0, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct btrfs_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int nritems = btrfs_header_nritems(leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) for (i = 0; i < nritems; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) btrfs_item_key_to_cpu(leaf, &key, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) switch (key.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case BTRFS_EXTENT_ITEM_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *num_bytes = key.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case BTRFS_METADATA_ITEM_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *bytenr = key.objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = process_extent_item(fs_info, path, &key, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) &tree_block_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case BTRFS_TREE_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = add_tree_block(fs_info, key.offset, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) key.objectid, tree_block_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case BTRFS_SHARED_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = add_tree_block(fs_info, 0, key.offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) key.objectid, tree_block_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case BTRFS_EXTENT_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dref = btrfs_item_ptr(leaf, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct btrfs_extent_data_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) case BTRFS_SHARED_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) sref = btrfs_item_ptr(leaf, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct btrfs_shared_data_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) count = btrfs_shared_data_ref_count(leaf, sref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = add_shared_data_ref(fs_info, key.offset, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *bytenr, *num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Walk down to the leaf from the given level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int level, u64 *bytenr, u64 *num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct btrfs_fs_info *fs_info = root->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) u64 block_bytenr, gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) while (level >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct btrfs_key first_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) block_bytenr = btrfs_node_blockptr(path->nodes[level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) gen = btrfs_node_ptr_generation(path->nodes[level],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) btrfs_node_key_to_cpu(path->nodes[level], &first_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) path->slots[level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) eb = read_tree_block(fs_info, block_bytenr, gen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) level - 1, &first_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (IS_ERR(eb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return PTR_ERR(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!extent_buffer_uptodate(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) free_extent_buffer(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) btrfs_tree_read_lock(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) btrfs_set_lock_blocking_read(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) path->nodes[level-1] = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) path->slots[level-1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ret = process_leaf(root, path, bytenr, num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) level--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Walk up to the next node that needs to be processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static int walk_up_tree(struct btrfs_path *path, int *level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (l = 0; l < BTRFS_MAX_LEVEL; l++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!path->nodes[l])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) path->slots[l]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (path->slots[l] <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) btrfs_header_nritems(path->nodes[l])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *level = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) free_extent_buffer(path->nodes[l]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) path->nodes[l] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) path->slots[l] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) path->locks[l] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void dump_ref_action(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct ref_action *ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ra->ref.owner, ra->ref.offset, ra->ref.num_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) __print_stack_trace(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Dumps all the information from the block entry to printk, it's going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * awesome.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void dump_block_entry(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct block_entry *be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct ref_entry *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct root_entry *re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct ref_action *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) be->bytenr, be->len, be->num_refs, be->metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) be->from_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (n = rb_first(&be->refs); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) ref = rb_entry(n, struct ref_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ref->root_objectid, ref->parent, ref->owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ref->offset, ref->num_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (n = rb_first(&be->roots); n; n = rb_next(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) re = rb_entry(n, struct root_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) btrfs_err(fs_info, " root entry %llu, num_refs %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) re->root_objectid, re->num_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) list_for_each_entry(ra, &be->actions, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * This will add an action item to the given bytenr and do sanity checks to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * sure we haven't messed something up. If we are making a new allocation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * this block entry has history we will delete all previous actions as long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * our sanity checks pass as they are no longer needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct btrfs_ref *generic_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct ref_entry *ref = NULL, *exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct ref_action *ra = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct block_entry *be = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct root_entry *re = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int action = generic_ref->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bool metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u64 bytenr = generic_ref->bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u64 num_bytes = generic_ref->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u64 parent = generic_ref->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) u64 ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) u64 owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!btrfs_test_opt(fs_info, REF_VERIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (generic_ref->type == BTRFS_REF_METADATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ref_root = generic_ref->tree_ref.root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) owner = generic_ref->tree_ref.level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ref_root = generic_ref->data_ref.ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) owner = generic_ref->data_ref.ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) offset = generic_ref->data_ref.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!ra || !ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ref->root_objectid = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ref->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ref->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) memcpy(&ra->ref, ref, sizeof(struct ref_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Save the extra info from the delayed ref in the ref action to make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * easier to figure out what is happening. The real ref's we add to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * ref tree need to reflect what we save on disk so it matches any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * on-disk refs we pre-loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ra->ref.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ra->ref.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ra->ref.root_objectid = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) __save_stack_trace(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) INIT_LIST_HEAD(&ra->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ra->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ra->root = generic_ref->real_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * This is an allocation, preallocate the block_entry in case we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * used it before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (action == BTRFS_ADD_DELAYED_EXTENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * For subvol_create we'll just pass in whatever the parent root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * is and the new root objectid, so let's not treat the passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * in root as if it really has a ref for this bytenr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (IS_ERR(be)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = PTR_ERR(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) be->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) be->metadata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (be->num_refs != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) "re-allocated a block that still has references to it!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) while (!list_empty(&be->actions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct ref_action *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) tmp = list_first_entry(&be->actions, struct ref_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) list_del(&tmp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct root_entry *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * This is the root that is modifying us, so it's the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * one we want to lookup below when we modify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * re->num_refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ref_root = generic_ref->real_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) re->root_objectid = generic_ref->real_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) re->num_refs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_lock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) be = lookup_block_entry(&fs_info->block_tree, bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!be) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) action, (unsigned long long)bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) (unsigned long long)num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) } else if (be->num_refs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) "trying to do action %d for a bytenr that has 0 total references",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) tmp = insert_root_entry(&be->roots, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kfree(re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) re = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) exist = insert_ref_entry(&be->refs, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (exist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (action == BTRFS_DROP_DELAYED_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (exist->num_refs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) "dropping a ref for a existing root that doesn't have a ref on the block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) exist->num_refs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (exist->num_refs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) rb_erase(&exist->node, &be->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) kfree(exist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) } else if (!be->metadata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) exist->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) "attempting to add another ref for an existing ref on a tree block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (action == BTRFS_DROP_DELAYED_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) "dropping a ref for a root that doesn't have a ref on the block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!parent && !re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) re = lookup_root_entry(&be->roots, ref_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!re) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * This shouldn't happen because we will add our re
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * above when we lookup the be with !parent, but just in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * case catch this case so we don't panic because I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * didn't think of some other corner case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) btrfs_err(fs_info, "failed to find root %llu for %llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) generic_ref->real_root, be->bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dump_ref_action(fs_info, ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (action == BTRFS_DROP_DELAYED_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) re->num_refs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) be->num_refs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) } else if (action == BTRFS_ADD_DELAYED_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) be->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (re)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) re->num_refs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) list_add_tail(&ra->list, &be->actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* Free up the ref cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct block_entry *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!btrfs_test_opt(fs_info, REF_VERIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_lock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) while ((n = rb_first(&fs_info->block_tree))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) be = rb_entry(n, struct block_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) rb_erase(&be->node, &fs_info->block_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) free_block_entry(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) cond_resched_lock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct block_entry *be = NULL, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!btrfs_test_opt(fs_info, REF_VERIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) spin_lock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) n = fs_info->block_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) entry = rb_entry(n, struct block_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (entry->bytenr < start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) } else if (entry->bytenr > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) be = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* We want to get as close to start as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (be == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) (entry->bytenr < start && be->bytenr > start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) (entry->bytenr < start && entry->bytenr > be->bytenr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) be = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Could have an empty block group, maybe have something to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * this case to verify we were actually empty?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!be) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) n = &be->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) be = rb_entry(n, struct block_entry, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) n = rb_next(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (be->bytenr < start && be->bytenr + be->len > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) "block entry overlaps a block group [%llu,%llu]!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (be->bytenr < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (be->bytenr >= start + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (be->bytenr + be->len > start + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) btrfs_err(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) "block entry overlaps a block group [%llu,%llu]!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) dump_block_entry(fs_info, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rb_erase(&be->node, &fs_info->block_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) free_block_entry(be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_unlock(&fs_info->ref_verify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Walk down all roots and build the ref tree, meant to be called at mount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct btrfs_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct extent_buffer *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u64 bytenr = 0, num_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int ret, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!btrfs_test_opt(fs_info, REF_VERIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) path = btrfs_alloc_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) eb = btrfs_read_lock_root_node(fs_info->extent_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) btrfs_set_lock_blocking_read(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) level = btrfs_header_level(eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) path->nodes[level] = eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) path->slots[level] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * We have to keep track of the bytenr/num_bytes we last hit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * because we could have run out of space for an inline ref, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * would have had to added a ref key item which may appear on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * different leaf from the original extent item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = walk_down_tree(fs_info->extent_root, path, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) &bytenr, &num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = walk_up_tree(path, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) btrfs_free_ref_cache(fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) btrfs_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }