^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "delayed-ref.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "transaction.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "qgroup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "space-info.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct kmem_cache *btrfs_delayed_ref_head_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct kmem_cache *btrfs_delayed_tree_ref_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct kmem_cache *btrfs_delayed_data_ref_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct kmem_cache *btrfs_delayed_extent_op_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * delayed back reference update tracking. For subvolume trees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * we queue up extent allocations and backref maintenance for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * delayed processing. This avoids deep call chains where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * add extents in the middle of btrfs_search_slot, and it allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * us to buffer up frequently modified backrefs in an rb tree instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * of hammering updates on the extent allocation tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u64 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) spin_lock(&global_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) reserved = global_rsv->reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) spin_unlock(&global_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Since the global reserve is just kind of magic we don't really want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * to rely on it to save our bacon, so if our size is more than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * delayed_refs_rsv and the global rsv then it's time to think about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * bailing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) spin_lock(&delayed_refs_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) reserved += delayed_refs_rsv->reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (delayed_refs_rsv->size >= reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) spin_unlock(&delayed_refs_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 num_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_read(&trans->transaction->delayed_refs.num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u64 avg_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) val = num_entries * avg_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (val >= NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (val >= NSEC_PER_SEC / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return btrfs_check_space_for_delayed_refs(trans->fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @fs_info - the fs_info for our fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @nr - the number of items to drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * This drops the delayed ref head's count from the delayed refs rsv and frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * any excess reservation we had.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 released = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (released)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 0, released, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @trans - the trans that may have generated delayed refs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * it'll calculate the additional size and add it to the delayed_refs_rsv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u64 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!trans->delayed_ref_updates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) num_bytes = btrfs_calc_insert_metadata_size(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) trans->delayed_ref_updates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_lock(&delayed_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) delayed_rsv->size += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) delayed_rsv->full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_unlock(&delayed_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) trans->delayed_ref_updates = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @fs_info - the fs info for our fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @src - the source block rsv to transfer from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @num_bytes - the number of bytes to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This transfers up to the num_bytes amount from the src rsv to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * delayed_refs_rsv. Any extra bytes are returned to the space info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct btrfs_block_rsv *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u64 num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u64 to_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_lock(&src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) src->reserved -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) src->size -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_unlock(&src->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_lock(&delayed_refs_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 delta = delayed_refs_rsv->size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) delayed_refs_rsv->reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (num_bytes > delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) to_free = num_bytes - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) num_bytes = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) to_free = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) num_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) delayed_refs_rsv->reserved += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) delayed_refs_rsv->full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_unlock(&delayed_refs_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 0, num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) btrfs_space_info_free_bytes_may_use(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) delayed_refs_rsv->space_info, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * @fs_info - the fs_info for our fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @flush - control how we can flush for this reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * This will refill the delayed block_rsv up to 1 items size worth of space and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * will return -ENOSPC if we can't make the reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) enum btrfs_reserve_flush_enum flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u64 num_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock(&block_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (block_rsv->reserved < block_rsv->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) num_bytes = block_rsv->size - block_rsv->reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) num_bytes = min(num_bytes, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_unlock(&block_rsv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) num_bytes, flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 0, num_bytes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * compare two delayed tree backrefs with same bytenr and type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct btrfs_delayed_tree_ref *ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (ref1->root < ref2->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (ref1->root > ref2->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ref1->parent < ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ref1->parent > ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * compare two delayed data backrefs with same bytenr and type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct btrfs_delayed_data_ref *ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (ref1->root < ref2->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ref1->root > ref2->root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (ref1->objectid < ref2->objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ref1->objectid > ref2->objectid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (ref1->offset < ref2->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ref1->offset > ref2->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ref1->parent < ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (ref1->parent > ref2->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static int comp_refs(struct btrfs_delayed_ref_node *ref1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct btrfs_delayed_ref_node *ref2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool check_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (ref1->type < ref2->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (ref1->type > ref2->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) btrfs_delayed_node_to_tree_ref(ref2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) btrfs_delayed_node_to_data_ref(ref2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (check_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ref1->seq < ref2->seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (ref1->seq > ref2->seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* insert a new ref to head ref rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rb_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct rb_node **p = &root->rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct btrfs_delayed_ref_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct btrfs_delayed_ref_head *ins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u64 bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bytenr = ins->bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (bytenr < entry->bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) } else if (bytenr > entry->bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rb_link_node(node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rb_insert_color_cached(node, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct btrfs_delayed_ref_node *ins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct rb_node **p = &root->rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct rb_node *node = &ins->ref_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct rb_node *parent_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct btrfs_delayed_ref_node *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) parent_node = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) comp = comp_refs(ins, entry, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (comp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) } else if (comp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rb_link_node(node, parent_node, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) rb_insert_color_cached(node, root, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static struct btrfs_delayed_ref_head *find_first_ref_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct btrfs_delayed_ref_root *dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct btrfs_delayed_ref_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) n = rb_first_cached(&dr->href_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Find a head entry based on bytenr. This returns the delayed ref head if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * was able to find one, or NULL if nothing was in that spot. If return_bigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * is given, the next bigger entry is returned if no exact match is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static struct btrfs_delayed_ref_head *find_ref_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct btrfs_delayed_ref_root *dr, u64 bytenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) bool return_bigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct rb_root *root = &dr->href_root.rb_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct btrfs_delayed_ref_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) n = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (bytenr < entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else if (bytenr > entry->bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (entry && return_bigger) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (bytenr > entry->bytenr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) n = rb_next(&entry->href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) entry = rb_entry(n, struct btrfs_delayed_ref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct btrfs_delayed_ref_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) lockdep_assert_held(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (mutex_trylock(&head->mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) refcount_inc(&head->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mutex_lock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_lock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (RB_EMPTY_NODE(&head->href_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mutex_unlock(&head->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) btrfs_put_delayed_ref_head(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) btrfs_put_delayed_ref_head(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct btrfs_delayed_ref_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct btrfs_delayed_ref_node *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) lockdep_assert_held(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) rb_erase_cached(&ref->ref_node, &head->ref_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) RB_CLEAR_NODE(&ref->ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!list_empty(&ref->add_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) list_del(&ref->add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ref->in_tree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) btrfs_put_delayed_ref(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) atomic_dec(&delayed_refs->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static bool merge_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct btrfs_delayed_ref_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct btrfs_delayed_ref_node *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u64 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct btrfs_delayed_ref_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct rb_node *node = rb_next(&ref->ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) while (!done && node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) node = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (seq && next->seq >= seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (comp_refs(ref, next, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (ref->action == next->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mod = next->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (ref->ref_mod < next->ref_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) swap(ref, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mod = -next->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) drop_delayed_ref(trans, delayed_refs, head, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ref->ref_mod += mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (ref->ref_mod == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) drop_delayed_ref(trans, delayed_refs, head, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Can't have multiples of the same ref on a tree block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct btrfs_delayed_ref_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct btrfs_delayed_ref_node *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u64 seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) lockdep_assert_held(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* We don't have too many refs to merge for data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (head->is_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) read_lock(&fs_info->tree_mod_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!list_empty(&fs_info->tree_mod_seq_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct seq_list *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) elem = list_first_entry(&fs_info->tree_mod_seq_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct seq_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) seq = elem->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) read_unlock(&fs_info->tree_mod_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) for (node = rb_first_cached(&head->ref_tree); node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) node = rb_next(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (seq && ref->seq >= seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (merge_ref(trans, delayed_refs, head, ref, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct seq_list *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) read_lock(&fs_info->tree_mod_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!list_empty(&fs_info->tree_mod_seq_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) elem = list_first_entry(&fs_info->tree_mod_seq_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct seq_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (seq >= elem->seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) btrfs_debug(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) "holding back delayed_ref %#x.%x, lowest is %#x.%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) (u32)(seq >> 32), (u32)seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) (u32)(elem->seq >> 32), (u32)elem->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) read_unlock(&fs_info->tree_mod_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct btrfs_delayed_ref_head *btrfs_select_ref_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct btrfs_delayed_ref_root *delayed_refs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct btrfs_delayed_ref_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!head && delayed_refs->run_delayed_start != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) delayed_refs->run_delayed_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) head = find_first_ref_head(delayed_refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) while (head->processing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) node = rb_next(&head->href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (delayed_refs->run_delayed_start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) delayed_refs->run_delayed_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) head = rb_entry(node, struct btrfs_delayed_ref_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) head->processing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) WARN_ON(delayed_refs->num_heads_ready == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) delayed_refs->num_heads_ready--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) delayed_refs->run_delayed_start = head->bytenr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) head->num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct btrfs_delayed_ref_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) lockdep_assert_held(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) lockdep_assert_held(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rb_erase_cached(&head->href_node, &delayed_refs->href_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) RB_CLEAR_NODE(&head->href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) atomic_dec(&delayed_refs->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) delayed_refs->num_heads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (head->processing == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) delayed_refs->num_heads_ready--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Helper to insert the ref_node to the tail or merge with tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Return 0 for insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Return >0 for merge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int insert_delayed_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct btrfs_delayed_ref_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct btrfs_delayed_ref_head *href,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct btrfs_delayed_ref_node *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct btrfs_delayed_ref_node *exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) spin_lock(&href->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) exist = tree_insert(&href->ref_tree, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) goto inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Now we are sure we can merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (exist->action == ref->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mod = ref->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Need to change action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (exist->ref_mod < ref->ref_mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) exist->action = ref->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) mod = -exist->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) exist->ref_mod = ref->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ref->action == BTRFS_ADD_DELAYED_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) list_add_tail(&exist->add_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) &href->ref_add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) else if (ref->action == BTRFS_DROP_DELAYED_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ASSERT(!list_empty(&exist->add_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) list_del(&exist->add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) mod = -ref->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) exist->ref_mod += mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* remove existing tail if its ref_mod is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (exist->ref_mod == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) drop_delayed_ref(trans, root, href, exist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) spin_unlock(&href->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) inserted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (ref->action == BTRFS_ADD_DELAYED_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) list_add_tail(&ref->add_list, &href->ref_add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) atomic_inc(&root->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) spin_unlock(&href->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * helper function to update the accounting in the head ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * existing and update must have the same bytenr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct btrfs_delayed_ref_head *existing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct btrfs_delayed_ref_head *update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct btrfs_delayed_ref_root *delayed_refs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) u64 flags = btrfs_ref_head_to_space_flags(existing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int old_ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) BUG_ON(existing->is_data != update->is_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spin_lock(&existing->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (update->must_insert_reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* if the extent was freed and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * reallocated before the delayed ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * entries were processed, we can end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * with an existing head ref without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * the must_insert_reserved flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Set it again here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) existing->must_insert_reserved = update->must_insert_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * update the num_bytes so we make sure the accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * is done correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) existing->num_bytes = update->num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (update->extent_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!existing->extent_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) existing->extent_op = update->extent_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (update->extent_op->update_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) memcpy(&existing->extent_op->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) &update->extent_op->key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) sizeof(update->extent_op->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) existing->extent_op->update_key = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (update->extent_op->update_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) existing->extent_op->flags_to_set |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) update->extent_op->flags_to_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) existing->extent_op->update_flags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) btrfs_free_delayed_extent_op(update->extent_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * update the reference mod on the head to reflect this new operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * only need the lock for this case cause we could be processing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * currently, for refs we just added we know we're a-ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) old_ref_mod = existing->total_ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) existing->ref_mod += update->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) existing->total_ref_mod += update->ref_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * If we are going to from a positive ref mod to a negative or vice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * versa we need to make sure to adjust pending_csums accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (existing->is_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) u64 csum_leaves =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) btrfs_csum_bytes_to_leaves(fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) existing->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) delayed_refs->pending_csums -= existing->num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) delayed_refs->pending_csums += existing->num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) trans->delayed_ref_updates += csum_leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * This handles the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * 1. We had a ref mod of 0 or more and went negative, indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * we may be freeing space, so add our space to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * total_bytes_pinned counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * 2. We were negative and went to 0 or positive, so no longer can say
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * that the space would be pinned, decrement our counter from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * total_bytes_pinned counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * 3. We are now at 0 and have ->must_insert_reserved set, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * this was a new allocation and then we dropped it, and thus must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * add our space to the total_bytes_pinned counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_unlock(&existing->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct btrfs_qgroup_extent_record *qrecord,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) u64 bytenr, u64 num_bytes, u64 ref_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u64 reserved, int action, bool is_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) bool is_system)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int count_mod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int must_insert_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* If reserved is provided, it must be a data extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) BUG_ON(!is_data && reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * The head node stores the sum of all the mods, so dropping a ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * should drop the sum in the head node by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (action == BTRFS_UPDATE_DELAYED_HEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) count_mod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) else if (action == BTRFS_DROP_DELAYED_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) count_mod = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * accounting when the extent is finally added, or if a later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * modification deletes the delayed ref without ever inserting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * extent into the extent allocation tree. ref->must_insert_reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * is the flag used to record that accounting mods are required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Once we record must_insert_reserved, switch the action to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * BTRFS_ADD_DELAYED_REF because other special casing is not required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (action == BTRFS_ADD_DELAYED_EXTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) must_insert_reserved = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) must_insert_reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) refcount_set(&head_ref->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) head_ref->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) head_ref->num_bytes = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) head_ref->ref_mod = count_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) head_ref->must_insert_reserved = must_insert_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) head_ref->is_data = is_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) head_ref->is_system = is_system;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) head_ref->ref_tree = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) INIT_LIST_HEAD(&head_ref->ref_add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) RB_CLEAR_NODE(&head_ref->href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) head_ref->processing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) head_ref->total_ref_mod = count_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_lock_init(&head_ref->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) mutex_init(&head_ref->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (qrecord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ref_root && reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) qrecord->data_rsv = reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) qrecord->data_rsv_refroot = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) qrecord->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) qrecord->num_bytes = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) qrecord->old_roots = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * helper function to actually insert a head node into the rbtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * this does all the dirty work in terms of maintaining the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * overall modification count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static noinline struct btrfs_delayed_ref_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) add_delayed_ref_head(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct btrfs_delayed_ref_head *head_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct btrfs_qgroup_extent_record *qrecord,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int action, int *qrecord_inserted_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct btrfs_delayed_ref_head *existing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct btrfs_delayed_ref_root *delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int qrecord_inserted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) delayed_refs = &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Record qgroup extent info if provided */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (qrecord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) delayed_refs, qrecord))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) kfree(qrecord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) qrecord_inserted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) existing = htree_insert(&delayed_refs->href_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) &head_ref->href_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (existing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) update_existing_head_ref(trans, existing, head_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * we've updated the existing ref, free the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * allocated ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) head_ref = existing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u64 flags = btrfs_ref_head_to_space_flags(head_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (head_ref->is_data && head_ref->ref_mod < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) delayed_refs->pending_csums += head_ref->num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) trans->delayed_ref_updates +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) btrfs_csum_bytes_to_leaves(trans->fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) head_ref->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (head_ref->ref_mod < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) head_ref->num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) delayed_refs->num_heads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) delayed_refs->num_heads_ready++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) atomic_inc(&delayed_refs->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) trans->delayed_ref_updates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (qrecord_inserted_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *qrecord_inserted_ret = qrecord_inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return head_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * init_delayed_ref_common - Initialize the structure which represents a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * modification to a an extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * @fs_info: Internal to the mounted filesystem mount structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @ref: The structure which is going to be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * @bytenr: The logical address of the extent for which a modification is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * going to be recorded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * @num_bytes: Size of the extent whose modification is being recorded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @ref_root: The id of the root where this modification has originated, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * can be either one of the well-known metadata trees or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * subvolume id which references this extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * BTRFS_ADD_DELAYED_EXTENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @ref_type: Holds the type of the extent which is being recorded, can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct btrfs_delayed_ref_node *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) u64 bytenr, u64 num_bytes, u64 ref_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int action, u8 ref_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) u64 seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (action == BTRFS_ADD_DELAYED_EXTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) action = BTRFS_ADD_DELAYED_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (is_fstree(ref_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) seq = atomic64_read(&fs_info->tree_mod_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) refcount_set(&ref->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ref->bytenr = bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ref->num_bytes = num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ref->ref_mod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ref->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ref->is_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ref->in_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ref->seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ref->type = ref_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) RB_CLEAR_NODE(&ref->ref_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) INIT_LIST_HEAD(&ref->add_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * add a delayed tree ref. This does all of the accounting required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * to make sure the delayed ref is eventually processed before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * transaction commits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct btrfs_delayed_extent_op *extent_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct btrfs_delayed_tree_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct btrfs_delayed_ref_head *head_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct btrfs_delayed_ref_root *delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct btrfs_qgroup_extent_record *record = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int qrecord_inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) bool is_system;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) int action = generic_ref->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int level = generic_ref->tree_ref.level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) u64 bytenr = generic_ref->bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u64 num_bytes = generic_ref->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) u64 parent = generic_ref->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) u8 ref_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) BUG_ON(extent_op && extent_op->is_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!head_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) is_fstree(generic_ref->real_root) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) is_fstree(generic_ref->tree_ref.root) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) !generic_ref->skip_qgroup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) record = kzalloc(sizeof(*record), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ref_type = BTRFS_TREE_BLOCK_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) generic_ref->tree_ref.root, action, ref_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) ref->root = generic_ref->tree_ref.root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ref->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) generic_ref->tree_ref.root, 0, action, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) is_system);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) head_ref->extent_op = extent_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) delayed_refs = &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) spin_lock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * insert both the head node and the new ref without dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * the spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) head_ref = add_delayed_ref_head(trans, head_ref, record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) action, &qrecord_inserted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * Need to update the delayed_refs_rsv with any changes we may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) btrfs_update_delayed_refs_rsv(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) action == BTRFS_ADD_DELAYED_EXTENT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) BTRFS_ADD_DELAYED_REF : action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (qrecord_inserted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) btrfs_qgroup_trace_extent_post(fs_info, record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct btrfs_ref *generic_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) u64 reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct btrfs_fs_info *fs_info = trans->fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct btrfs_delayed_data_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct btrfs_delayed_ref_head *head_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct btrfs_delayed_ref_root *delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct btrfs_qgroup_extent_record *record = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int qrecord_inserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int action = generic_ref->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) u64 bytenr = generic_ref->bytenr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) u64 num_bytes = generic_ref->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) u64 parent = generic_ref->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) u64 ref_root = generic_ref->data_ref.ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) u64 owner = generic_ref->data_ref.ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) u64 offset = generic_ref->data_ref.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) u8 ref_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ref_type = BTRFS_SHARED_DATA_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ref_type = BTRFS_EXTENT_DATA_REF_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ref_root, action, ref_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ref->root = ref_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ref->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ref->objectid = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ref->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!head_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) is_fstree(ref_root) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) is_fstree(generic_ref->real_root) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) !generic_ref->skip_qgroup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) record = kzalloc(sizeof(*record), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) kmem_cache_free(btrfs_delayed_ref_head_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) head_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) reserved, action, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) head_ref->extent_op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) delayed_refs = &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) spin_lock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * insert both the head node and the new ref without dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * the spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) head_ref = add_delayed_ref_head(trans, head_ref, record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) action, &qrecord_inserted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Need to update the delayed_refs_rsv with any changes we may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) btrfs_update_delayed_refs_rsv(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) action == BTRFS_ADD_DELAYED_EXTENT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) BTRFS_ADD_DELAYED_REF : action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (qrecord_inserted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return btrfs_qgroup_trace_extent_post(fs_info, record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) u64 bytenr, u64 num_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct btrfs_delayed_extent_op *extent_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct btrfs_delayed_ref_head *head_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct btrfs_delayed_ref_root *delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (!head_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) head_ref->extent_op = extent_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) delayed_refs = &trans->transaction->delayed_refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) spin_lock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) spin_unlock(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Need to update the delayed_refs_rsv with any changes we may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) btrfs_update_delayed_refs_rsv(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * This does a simple search for the head node for a given extent. Returns the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * head node if found, or NULL if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct btrfs_delayed_ref_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) lockdep_assert_held(&delayed_refs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return find_ref_head(delayed_refs, bytenr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) void __cold btrfs_delayed_ref_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int __init btrfs_delayed_ref_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) btrfs_delayed_ref_head_cachep = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) "btrfs_delayed_ref_head",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) sizeof(struct btrfs_delayed_ref_head), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!btrfs_delayed_ref_head_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) btrfs_delayed_tree_ref_cachep = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) "btrfs_delayed_tree_ref",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sizeof(struct btrfs_delayed_tree_ref), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!btrfs_delayed_tree_ref_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) btrfs_delayed_data_ref_cachep = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) "btrfs_delayed_data_ref",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) sizeof(struct btrfs_delayed_data_ref), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!btrfs_delayed_data_ref_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) btrfs_delayed_extent_op_cachep = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) "btrfs_delayed_extent_op",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) sizeof(struct btrfs_delayed_extent_op), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) SLAB_MEM_SPREAD, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!btrfs_delayed_extent_op_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) btrfs_delayed_ref_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }