^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef BTRFS_DELAYED_REF_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define BTRFS_DELAYED_REF_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* these are the possible values of struct btrfs_delayed_ref_node->action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct btrfs_delayed_ref_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct rb_node ref_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * If action is BTRFS_ADD_DELAYED_REF, also link this node to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ref_head->ref_add_list, then we do not need to iterate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct list_head add_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* the starting bytenr of the extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* the size of the extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* seq number to keep track of insertion order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u64 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* ref count on this data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * how many refs is this entry adding or deleting. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * head refs, this may be a negative number because it is keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * track of the total mods done to the reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * For individual refs, this will always be a positive number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * It may be more than one, since it is possible for a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * parent to have more than one ref on an extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int action:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int type:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* is this node still in the rbtree? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int is_head:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int in_tree:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct btrfs_delayed_extent_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct btrfs_disk_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u8 level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool update_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) bool update_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) bool is_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u64 flags_to_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the head refs are used to hold a lock on a given extent, which allows us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * to make sure that only one process is running the delayed refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * at a time for a single extent. They also store the sum of all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * reference count modifications we've queued up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct btrfs_delayed_ref_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) refcount_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * the mutex is held while running the refs, and it is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * held when checking the sum of reference modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct rb_root_cached ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head ref_add_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rb_node href_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct btrfs_delayed_extent_op *extent_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * This is used to track the final ref_mod from all the refs associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * with this head ref, this is not adjusted as delayed refs are run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * this is meant to track if we need to do the csum accounting or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int total_ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * This is the current outstanding mod references for this bytenr. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * is used with lookup_extent_info to get an accurate reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * for a bytenr, so it is adjusted as delayed refs are run so that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * on disk reference count + ref_mod is accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * when a new extent is allocated, it is just reserved in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * The actual extent isn't inserted into the extent allocation tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * until the delayed ref is processed. must_insert_reserved is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * used to flag a delayed ref so the accounting can be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * when a full insert is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * It is possible the extent will be freed before it is ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * inserted into the extent allocation tree. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * we need to update the in ram accounting to properly reflect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * the free has happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned int must_insert_reserved:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int is_data:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned int is_system:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int processing:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct btrfs_delayed_tree_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct btrfs_delayed_ref_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct btrfs_delayed_data_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct btrfs_delayed_ref_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 objectid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct btrfs_delayed_ref_root {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* head ref rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct rb_root_cached href_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* dirty extent records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct rb_root dirty_extent_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* this spin lock protects the rbtree and the entries inside */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* how many delayed ref updates we've queued, used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * throttling code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_t num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* total number of head nodes in tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long num_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* total number of head nodes ready for processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long num_heads_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u64 pending_csums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * set when the tree is flushing before a transaction commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * used by the throttling code to decide if new updates need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * to be run right away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int flushing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 run_delayed_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * To make qgroup to skip given root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * This is for snapshot, as btrfs_qgroup_inherit() will manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * modify counters for snapshot and its source, so we should skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * the snapshot in new_root/old_roots or it will get calculated twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u64 qgroup_to_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) enum btrfs_ref_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) BTRFS_REF_NOT_SET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) BTRFS_REF_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) BTRFS_REF_METADATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) BTRFS_REF_LAST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct btrfs_data_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* For EXTENT_DATA_REF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Root which refers to this data extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u64 ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Inode which refers to this data extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u64 ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * file_offset - extent_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * file_offset is the key.offset of the EXTENT_DATA key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct btrfs_tree_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Level of this tree block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Root which refers to this tree block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u64 root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* For non-skinny metadata, no special member needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct btrfs_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) enum btrfs_ref_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Whether this extent should go through qgroup record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Normally false, but for certain cases like delayed subtree scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * setting this flag can hugely reduce qgroup overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bool skip_qgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Optional. For which root is this modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Mostly used for qgroup optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * When unset, data/tree ref init code will populate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * In certain cases, we're modifying reference for a different root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * E.g. COW fs tree blocks for balance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * In that case, tree_ref::root will be fs tree, but we're doing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * for reloc tree, then we should set @real_root to reloc tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u64 real_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Bytenr of the parent tree block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u64 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct btrfs_data_ref data_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct btrfs_tree_ref tree_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int __init btrfs_delayed_ref_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void __cold btrfs_delayed_ref_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int action, u64 bytenr, u64 len, u64 parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) generic_ref->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) generic_ref->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) generic_ref->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) generic_ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int level, u64 root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* If @real_root not set, use @root as fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!generic_ref->real_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) generic_ref->real_root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) generic_ref->tree_ref.level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) generic_ref->tree_ref.root = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) generic_ref->type = BTRFS_REF_METADATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u64 ref_root, u64 ino, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* If @real_root not set, use @root as fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!generic_ref->real_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) generic_ref->real_root = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) generic_ref->data_ref.ref_root = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) generic_ref->data_ref.ino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) generic_ref->data_ref.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) generic_ref->type = BTRFS_REF_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline struct btrfs_delayed_extent_op *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) btrfs_alloc_delayed_extent_op(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) WARN_ON(refcount_read(&ref->refs) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (refcount_dec_and_test(&ref->refs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) WARN_ON(ref->in_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) switch (ref->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) case BTRFS_TREE_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case BTRFS_SHARED_BLOCK_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) case BTRFS_EXTENT_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) case BTRFS_SHARED_DATA_REF_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static inline u64 btrfs_ref_head_to_space_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct btrfs_delayed_ref_head *head_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (head_ref->is_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return BTRFS_BLOCK_GROUP_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) else if (head_ref->is_system)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return BTRFS_BLOCK_GROUP_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return BTRFS_BLOCK_GROUP_METADATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (refcount_dec_and_test(&head->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct btrfs_delayed_extent_op *extent_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u64 reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u64 bytenr, u64 num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct btrfs_delayed_extent_op *extent_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct btrfs_delayed_ref_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct btrfs_delayed_ref_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u64 bytenr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct btrfs_delayed_ref_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mutex_unlock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct btrfs_delayed_ref_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct btrfs_delayed_ref_head *btrfs_select_ref_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct btrfs_delayed_ref_root *delayed_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) enum btrfs_reserve_flush_enum flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct btrfs_block_rsv *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u64 num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * helper functions to cast a node into its container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static inline struct btrfs_delayed_tree_ref *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return container_of(node, struct btrfs_delayed_tree_ref, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline struct btrfs_delayed_data_ref *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return container_of(node, struct btrfs_delayed_data_ref, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #endif